Readout mitigator classes (#6485)

* started BaseReadoutMitigator class

* formatting base_readout_mitigator_class

* add CompleteReadoutMitigator class

* update qiskit.result objects

* remove unnecessary methods from qiskit.result objects

* remove empty line

* remove unnecessary methods from base_readout_mitigator

* quasi_probablities function returns QuasiDistribution object

* changed diagonal type to Collable

* changed diagonal type to Callable

* updates following review

* style

* add init file

* unify API

* style

* fix lint errors

* add docs

* Initial mitigator tests

* Fixes to test mitigators

* Linting

* style of the test

* move mitigation out of quantum_info

* small fix in test

* add stddev upper bound based on gamma

* remove @staticmethod

* start tensored mitigation class

* temporary move methods to base class

* remove comment

* add new methods

* style

* add quasi_probabilities method in tensored mitigation

* add from_backend method

* Small fix to how the backend is called

* Adding script for Aer-based data generation for mitigator tests

* Rewriting the mitigator tests to be independent of Aer

* Additional tests and small bugfix

* Marginalization test working

* Some code restructure and bug fixes

* Different handling of stddev

* Added from_backend test (not working yet)

* from_backend bugfix

* Moving function to utils

* Documentation

* Linting

* fix import Aer in generate_data

* style

* fix lint errors

* more lint errors

* Test for expectation value

* Changes to ideal data in tests; all tests are now working

* More diagonal tests

* change stddev type to QuasiDistribution

* add release notes

* style

* some review comments

* refactor: complete --> correlated

* refactor: tensor --> local

* add qubits to init in correlated mitigation

* add qubits to init and from_backend in local mitigation

* use einsum directly on probs_vec in tensored mitigation

* update result Counts

* remove import

* refactor qubits --> self._qubits

* fix lint

* more refactor qubits --> self._qubits

* handle stddev_upper_bound

* handle docs

* add a test for expval stddev

* style for test

* fix lint

* Return quasi distributions with stddev

* Added test to detect current qubits-overwrite bug

* Changes to the handling of qubits in method (tests now work)

* Mitigator stddev test

* Break down counts_probability_vector

* Linting

* add docstrings

* minor changes in documentation foloowing review

* Small fixes and change to the way the qubits parameter is handled

* Mitigators now correctly handle qubit susbets

* Linting

* fix lint errors

* minor fixes

* Small fixes and error handling test

* minor fix in the test

* move the mitigation folder to qiskit.result.mitigation

* move the mitigation tests to test.python.result

* update import in mitigation files

* Attempting to fix cyclic dependancy error

* update import

* update import

* Attempting to fix cyclic dependancy error

* style

* updates following review comments

* remove generate data functions

* add more details in classes docstrings

* add more details in release notes

* move test_data to test_mitigators

* Update release note

Co-authored-by: Gadi Aleksandrowicz <gadia@il.ibm.com>
Co-authored-by: gadial <gadial@gmail.com>
Co-authored-by: Christopher Wood <cjwood@us.ibm.com>
This commit is contained in:
Shelly Garion 2021-12-02 22:53:31 +02:00 committed by GitHub
parent e77acc6aaf
commit ae55844b2b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 1393 additions and 1 deletions

View File

@ -34,6 +34,14 @@ Distributions
ProbDistribution
QuasiDistribution
Mitigation
==========
.. autosummary::
:toctree: ../stubs/
CorrelatedReadoutMitigator
LocalReadoutMitigator
"""
from .result import Result
@ -43,3 +51,5 @@ from .counts import Counts
from .distributions.probability import ProbDistribution
from .distributions.quasi import QuasiDistribution
from .mitigation.correlated_readout_mitigator import CorrelatedReadoutMitigator
from .mitigation.local_readout_mitigator import LocalReadoutMitigator

View File

@ -183,3 +183,7 @@ class Counts(dict):
def _remove_space_underscore(bitstring):
"""Removes all spaces and underscores from bitstring"""
return int(bitstring.replace(" ", "").replace("_", ""), 2)
def shots(self):
"""Return the number of shots"""
return sum(self.values())

View File

@ -28,7 +28,7 @@ class QuasiDistribution(dict):
_bitstring_regex = re.compile(r"^[01]+$")
def __init__(self, data, shots=None):
def __init__(self, data, shots=None, stddev_upper_bound=None):
"""Builds a quasiprobability distribution object.
Parameters:
@ -42,12 +42,14 @@ class QuasiDistribution(dict):
* An integer
shots (int): Number of shots the distribution was derived from.
stddev_upper_bound (float): An upper bound for the standard deviation
Raises:
TypeError: If the input keys are not a string or int
ValueError: If the string format of the keys is incorrect
"""
self.shots = shots
self._stddev_upper_bound = stddev_upper_bound
if data:
first_key = next(iter(data.keys()))
if isinstance(first_key, int):
@ -128,3 +130,8 @@ class QuasiDistribution(dict):
format ``"0x1a"``
"""
return {hex(key): value for key, value in self.items()}
@property
def stddev_upper_bound(self):
"""Return an upper bound on standard deviation of expval estimator."""
return self._stddev_upper_bound

View File

@ -0,0 +1,13 @@
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Readout error mitigation."""

View File

@ -0,0 +1,79 @@
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Base class for readout error mitigation.
"""
from abc import ABC, abstractmethod
from typing import Optional, List, Iterable, Tuple, Union, Callable
import numpy as np
from ..distributions.quasi import QuasiDistribution
from ..counts import Counts
class BaseReadoutMitigator(ABC):
"""Base readout error mitigator class."""
@abstractmethod
def quasi_probabilities(
self,
data: Counts,
qubits: Iterable[int] = None,
clbits: Optional[List[int]] = None,
shots: Optional[int] = None,
) -> QuasiDistribution:
"""Convert counts to a dictionary of quasi-probabilities
Args:
data: Counts to be mitigated.
qubits: the physical qubits measured to obtain the counts clbits.
If None these are assumed to be qubits [0, ..., N-1]
for N-bit counts.
clbits: Optional, marginalize counts to just these bits.
shots: Optional, the total number of shots, if None shots will
be calculated as the sum of all counts.
Returns:
QuasiDistibution: A dictionary containing pairs of [output, mean] where "output"
is the key in the dictionaries,
which is the length-N bitstring of a measured standard basis state,
and "mean" is the mean of non-zero quasi-probability estimates.
"""
@abstractmethod
def expectation_value(
self,
data: Counts,
diagonal: Union[Callable, dict, str, np.ndarray],
qubits: Iterable[int] = None,
clbits: Optional[List[int]] = None,
shots: Optional[int] = None,
) -> Tuple[float, float]:
"""Calculate the expectation value of a diagonal Hermitian operator.
Args:
data: Counts object to be mitigated.
diagonal: the diagonal operator. This may either be specified
as a string containing I,Z,0,1 characters, or as a
real valued 1D array_like object supplying the full diagonal,
or as a dictionary, or as Callable.
qubits: the physical qubits measured to obtain the counts clbits.
If None these are assumed to be qubits [0, ..., N-1]
for N-bit counts.
clbits: Optional, marginalize counts to just these bits.
shots: Optional, the total number of shots, if None shots will
be calculated as the sum of all counts.
Returns:
The mean and an upper bound of the standard deviation of operator
expectation value calculated from the current counts.
"""

View File

@ -0,0 +1,260 @@
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Readout mitigator class based on the A-matrix inversion method
"""
from typing import Optional, List, Tuple, Iterable, Callable, Union
import numpy as np
from qiskit.exceptions import QiskitError
from ..distributions.quasi import QuasiDistribution
from ..counts import Counts
from .base_readout_mitigator import BaseReadoutMitigator
from .utils import counts_probability_vector, z_diagonal, str2diag
class CorrelatedReadoutMitigator(BaseReadoutMitigator):
"""N-qubit readout error mitigator.
Mitigates :meth:`expectation_value` and :meth:`quasi_probabilities`.
The mitigation_matrix should be calibrated using qiskit experiments.
This mitigation method should be used in case the readout errors of the qubits
are assumed to be correlated. The mitigation_matrix of *N* qubits is of size
:math:`2^N x 2^N` so the mitigation complexity is :math:`O(4^N)`.
"""
def __init__(self, amat: np.ndarray, qubits: Optional[Iterable[int]] = None):
"""Initialize a CorrelatedReadoutMitigator
Args:
amat: readout error assignment matrix.
qubits: Optional, the measured physical qubits for mitigation.
Raises:
QiskitError: matrix size does not agree with number of qubits
"""
if np.any(amat < 0) or not np.allclose(np.sum(amat, axis=0), 1):
raise QiskitError("Assignment matrix columns must be valid probability distributions")
amat = np.asarray(amat, dtype=float)
matrix_qubits_num = int(np.log2(amat.shape[0]))
if qubits is None:
self._num_qubits = matrix_qubits_num
self._qubits = range(self._num_qubits)
else:
if len(qubits) != matrix_qubits_num:
raise QiskitError(
"The number of given qubits ({}) is different than the number of "
"qubits inferred from the matrices ({})".format(len(qubits), matrix_qubits_num)
)
self._qubits = qubits
self._num_qubits = len(self._qubits)
self._qubit_index = dict(zip(self._qubits, range(self._num_qubits)))
self._assignment_mat = amat
self._mitigation_mats = {}
def expectation_value(
self,
data: Counts,
diagonal: Union[Callable, dict, str, np.ndarray] = None,
qubits: Iterable[int] = None,
clbits: Optional[List[int]] = None,
shots: Optional[int] = None,
) -> Tuple[float, float]:
r"""Compute the mitigated expectation value of a diagonal observable.
This computes the mitigated estimator of
:math:`\langle O \rangle = \mbox{Tr}[\rho. O]` of a diagonal observable
:math:`O = \sum_{x\in\{0, 1\}^n} O(x)|x\rangle\!\langle x|`.
Args:
data: Counts object
diagonal: Optional, the vector of diagonal values for summing the
expectation value. If ``None`` the the default value is
:math:`[1, -1]^\otimes n`.
qubits: Optional, the measured physical qubits the count
bitstrings correspond to. If None qubits are assumed to be
:math:`[0, ..., n-1]`.
clbits: Optional, if not None marginalize counts to the specified bits.
shots: the number of shots.
Returns:
(float, float): the expectation value and an upper bound of the standard deviation.
Additional Information:
The diagonal observable :math:`O` is input using the ``diagonal`` kwarg as
a list or Numpy array :math:`[O(0), ..., O(2^n -1)]`. If no diagonal is specified
the diagonal of the Pauli operator
:math`O = \mbox{diag}(Z^{\otimes n}) = [1, -1]^{\otimes n}` is used.
The ``clbits`` kwarg is used to marginalize the input counts dictionary
over the specified bit-values, and the ``qubits`` kwarg is used to specify
which physical qubits these bit-values correspond to as
``circuit.measure(qubits, clbits)``.
"""
if qubits is None:
qubits = self._qubits
probs_vec, shots = counts_probability_vector(
data, qubit_index=self._qubit_index, clbits=clbits, qubits=qubits
)
# Get qubit mitigation matrix and mitigate probs
mit_mat = self.mitigation_matrix(qubits)
# Get operator coeffs
if diagonal is None:
diagonal = z_diagonal(2 ** self._num_qubits)
elif isinstance(diagonal, str):
diagonal = str2diag(diagonal)
# Apply transpose of mitigation matrix
coeffs = mit_mat.T.dot(diagonal)
expval = coeffs.dot(probs_vec)
stddev_upper_bound = self.stddev_upper_bound(shots)
return (expval, stddev_upper_bound)
def quasi_probabilities(
self,
data: Counts,
qubits: Optional[List[int]] = None,
clbits: Optional[List[int]] = None,
shots: Optional[bool] = False,
) -> QuasiDistribution:
"""Compute mitigated quasi probabilities value.
Args:
data: counts object
qubits: qubits the count bitstrings correspond to.
clbits: Optional, marginalize counts to just these bits.
shots: the number of shots.
Returns:
QuasiDistibution: A dictionary containing pairs of [output, mean] where "output"
is the key in the dictionaries,
which is the length-N bitstring of a measured standard basis state,
and "mean" is the mean of non-zero quasi-probability estimates.
"""
if qubits is None:
qubits = self._qubits
probs_vec, shots = counts_probability_vector(
data, qubit_index=self._qubit_index, clbits=clbits, qubits=qubits
)
# Get qubit mitigation matrix and mitigate probs
mit_mat = self.mitigation_matrix(qubits)
# Apply transpose of mitigation matrix
probs_vec = mit_mat.dot(probs_vec)
probs_dict = {}
for index, _ in enumerate(probs_vec):
probs_dict[index] = probs_vec[index]
quasi_dist = QuasiDistribution(
probs_dict, stddev_upper_bound=self.stddev_upper_bound(shots)
)
return quasi_dist
def mitigation_matrix(self, qubits: List[int] = None) -> np.ndarray:
r"""Return the readout mitigation matrix for the specified qubits.
The mitigation matrix :math:`A^{-1}` is defined as the inverse of the
:meth:`assignment_matrix` :math:`A`.
Args:
qubits: Optional, qubits being measured.
Returns:
np.ndarray: the measurement error mitigation matrix :math:`A^{-1}`.
"""
if qubits is None:
qubits = self._qubits
qubits = tuple(sorted(qubits))
# Check for cached mitigation matrix
# if not present compute
if qubits not in self._mitigation_mats:
marginal_matrix = self.assignment_matrix(qubits)
try:
mit_mat = np.linalg.inv(marginal_matrix)
except np.linalg.LinAlgError:
# Use pseudo-inverse if matrix is singular
mit_mat = np.linalg.pinv(marginal_matrix)
self._mitigation_mats[qubits] = mit_mat
return self._mitigation_mats[qubits]
def assignment_matrix(self, qubits: List[int] = None) -> np.ndarray:
r"""Return the readout assignment matrix for specified qubits.
The assignment matrix is the stochastic matrix :math:`A` which assigns
a noisy readout probability distribution to an ideal input
readout distribution: :math:`P(i|j) = \langle i | A | j \rangle`.
Args:
qubits: Optional, qubits being measured.
Returns:
np.ndarray: the assignment matrix A.
"""
if qubits is None:
qubits = self._qubits
if qubits == self._num_qubits:
return self._assignment_mat
if isinstance(qubits, int):
qubits = [qubits]
qubit_indices = [self._qubit_index[qubit] for qubit in qubits]
# Compute marginal matrix
axis = tuple(
self._num_qubits - 1 - i for i in set(range(self._num_qubits)).difference(qubit_indices)
)
num_qubits = len(qubits)
new_amat = np.zeros(2 * [2 ** num_qubits], dtype=float)
for i, col in enumerate(self._assignment_mat.T[self._keep_indexes(qubit_indices)]):
new_amat[i] = (
np.reshape(col, self._num_qubits * [2]).sum(axis=axis).reshape([2 ** num_qubits])
)
new_amat = new_amat.T
return new_amat
@staticmethod
def _keep_indexes(qubits):
indexes = [0]
for i in sorted(qubits):
indexes += [idx + (1 << i) for idx in indexes]
return indexes
def _compute_gamma(self):
"""Compute gamma for N-qubit mitigation"""
mitmat = self.mitigation_matrix(qubits=self._qubits)
return np.max(np.sum(np.abs(mitmat), axis=0))
def stddev_upper_bound(self, shots: int):
"""Return an upper bound on standard deviation of expval estimator.
Args:
shots: Number of shots used for expectation value measurement.
Returns:
float: the standard deviation upper bound.
"""
gamma = self._compute_gamma()
return gamma / np.sqrt(shots)
@property
def qubits(self) -> Tuple[int]:
"""The device qubits for this mitigator"""
return self._qubits

View File

@ -0,0 +1,309 @@
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Readout mitigator class based on the 1-qubit local tensored mitigation method
"""
from typing import Optional, List, Tuple, Iterable, Callable, Union
import numpy as np
from qiskit.exceptions import QiskitError
from ..distributions.quasi import QuasiDistribution
from ..counts import Counts
from .base_readout_mitigator import BaseReadoutMitigator
from .utils import counts_probability_vector, z_diagonal, str2diag
class LocalReadoutMitigator(BaseReadoutMitigator):
"""1-qubit tensor product readout error mitigator.
Mitigates :meth:`expectation_value` and :meth:`quasi_probabilities`.
The mitigator should either be calibrated using qiskit experiments,
or calculated directly from the backend properties.
This mitigation method should be used in case the readout errors of the qubits
are assumed to be uncorrelated. For *N* qubits there are *N* mitigation matrices,
each of size :math:`2 x 2` and the mitigation complexity is :math:`O(2^N)`,
so it is more efficient than the :class:`CorrelatedReadoutMitigator` class.
"""
def __init__(
self,
amats: Optional[List[np.ndarray]] = None,
qubits: Optional[Iterable[int]] = None,
backend=None,
):
"""Initialize a LocalReadoutMitigator
Args:
amats: Optional, list of single-qubit readout error assignment matrices.
qubits: Optional, the measured physical qubits for mitigation.
backend: Optional, backend name.
Raises:
QiskitError: matrices sizes do not agree with number of qubits
"""
if amats is None:
amats = self._from_backend(backend, qubits)
else:
amats = [np.asarray(amat, dtype=float) for amat in amats]
for amat in amats:
if np.any(amat < 0) or not np.allclose(np.sum(amat, axis=0), 1):
raise QiskitError(
"Assignment matrix columns must be valid probability distributions"
)
if qubits is None:
self._num_qubits = len(amats)
self._qubits = range(self._num_qubits)
else:
if len(qubits) != len(amats):
raise QiskitError(
"The number of given qubits ({}) is different than the number of qubits "
"inferred from the matrices ({})".format(len(qubits), len(amats))
)
self._qubits = qubits
self._num_qubits = len(self._qubits)
self._qubit_index = dict(zip(self._qubits, range(self._num_qubits)))
self._assignment_mats = amats
self._mitigation_mats = np.zeros([self._num_qubits, 2, 2], dtype=float)
self._gammas = np.zeros(self._num_qubits, dtype=float)
for i in range(self._num_qubits):
mat = self._assignment_mats[i]
# Compute Gamma values
error0 = mat[1, 0]
error1 = mat[0, 1]
self._gammas[i] = (1 + abs(error0 - error1)) / (1 - error0 - error1)
# Compute inverse mitigation matrix
try:
ainv = np.linalg.inv(mat)
except np.linalg.LinAlgError:
ainv = np.linalg.pinv(mat)
self._mitigation_mats[i] = ainv
def expectation_value(
self,
data: Counts,
diagonal: Union[Callable, dict, str, np.ndarray] = None,
qubits: Iterable[int] = None,
clbits: Optional[List[int]] = None,
shots: Optional[int] = None,
) -> Tuple[float, float]:
r"""Compute the mitigated expectation value of a diagonal observable.
This computes the mitigated estimator of
:math:`\langle O \rangle = \mbox{Tr}[\rho. O]` of a diagonal observable
:math:`O = \sum_{x\in\{0, 1\}^n} O(x)|x\rangle\!\langle x|`.
Args:
data: Counts object
diagonal: Optional, the vector of diagonal values for summing the
expectation value. If ``None`` the the default value is
:math:`[1, -1]^\otimes n`.
qubits: Optional, the measured physical qubits the count
bitstrings correspond to. If None qubits are assumed to be
:math:`[0, ..., n-1]`.
clbits: Optional, if not None marginalize counts to the specified bits.
shots: the number of shots.
Returns:
(float, float): the expectation value and an upper bound of the standard deviation.
Additional Information:
The diagonal observable :math:`O` is input using the ``diagonal`` kwarg as
a list or Numpy array :math:`[O(0), ..., O(2^n -1)]`. If no diagonal is specified
the diagonal of the Pauli operator
:math`O = \mbox{diag}(Z^{\otimes n}) = [1, -1]^{\otimes n}` is used.
The ``clbits`` kwarg is used to marginalize the input counts dictionary
over the specified bit-values, and the ``qubits`` kwarg is used to specify
which physical qubits these bit-values correspond to as
``circuit.measure(qubits, clbits)``.
"""
if qubits is None:
qubits = self._qubits
num_qubits = len(qubits)
probs_vec, shots = counts_probability_vector(
data, qubit_index=self._qubit_index, clbits=clbits, qubits=qubits
)
# Get qubit mitigation matrix and mitigate probs
qubit_indices = [self._qubit_index[qubit] for qubit in qubits]
ainvs = self._mitigation_mats[qubit_indices]
# Get operator coeffs
if diagonal is None:
diagonal = z_diagonal(2 ** num_qubits)
elif isinstance(diagonal, str):
diagonal = str2diag(diagonal)
# Apply transpose of mitigation matrix
coeffs = np.reshape(diagonal, num_qubits * [2])
einsum_args = [coeffs, list(range(num_qubits))]
for i, ainv in enumerate(reversed(ainvs)):
einsum_args += [ainv.T, [num_qubits + i, i]]
einsum_args += [list(range(num_qubits, 2 * num_qubits))]
coeffs = np.einsum(*einsum_args).ravel()
expval = coeffs.dot(probs_vec)
stddev_upper_bound = self.stddev_upper_bound(shots, qubits)
return (expval, stddev_upper_bound)
def quasi_probabilities(
self,
data: Counts,
qubits: Optional[List[int]] = None,
clbits: Optional[List[int]] = None,
shots: Optional[bool] = False,
) -> QuasiDistribution:
"""Compute mitigated quasi probabilities value.
Args:
data: counts object
qubits: qubits the count bitstrings correspond to.
clbits: Optional, marginalize counts to just these bits.
shots: the number of shots.
Returns:
QuasiDistibution: A dictionary containing pairs of [output, mean] where "output"
is the key in the dictionaries,
which is the length-N bitstring of a measured standard basis state,
and "mean" is the mean of non-zero quasi-probability estimates.
Raises:
QiskitError: if qubit and clbit kwargs are not valid.
"""
if qubits is None:
qubits = self._qubits
num_qubits = len(qubits)
probs_vec, shots = counts_probability_vector(
data, qubit_index=self._qubit_index, clbits=clbits, qubits=qubits
)
# Get qubit mitigation matrix and mitigate probs
qubit_indices = [self._qubit_index[qubit] for qubit in qubits]
ainvs = self._mitigation_mats[qubit_indices]
# Apply transpose of mitigation matrix
prob_tens = np.reshape(probs_vec, num_qubits * [2])
einsum_args = [prob_tens, list(range(num_qubits))]
for i, ainv in enumerate(reversed(ainvs)):
einsum_args += [ainv, [num_qubits + i, i]]
einsum_args += [list(range(num_qubits, 2 * num_qubits))]
probs_vec = np.einsum(*einsum_args).ravel()
probs_dict = {}
for index, _ in enumerate(probs_vec):
probs_dict[index] = probs_vec[index]
quasi_dist = QuasiDistribution(
probs_dict, stddev_upper_bound=self.stddev_upper_bound(shots, qubits)
)
return quasi_dist
def mitigation_matrix(self, qubits: Optional[Union[List[int], int]] = None) -> np.ndarray:
r"""Return the measurement mitigation matrix for the specified qubits.
The mitigation matrix :math:`A^{-1}` is defined as the inverse of the
:meth:`assignment_matrix` :math:`A`.
Args:
qubits: Optional, qubits being measured for operator expval.
if a single int is given, it is assumed to be the index
of the qubit in self._qubits
Returns:
np.ndarray: the measurement error mitigation matrix :math:`A^{-1}`.
"""
if qubits is None:
qubits = self._qubits
if isinstance(qubits, int):
qubits = [self._qubits[qubits]]
mat = self._mitigation_mats[qubits[0]]
for i in qubits[1:]:
mat = np.kron(self._mitigation_mats[i], mat)
return mat
def assignment_matrix(self, qubits: List[int] = None) -> np.ndarray:
r"""Return the measurement assignment matrix for specified qubits.
The assignment matrix is the stochastic matrix :math:`A` which assigns
a noisy measurement probability distribution to an ideal input
measurement distribution: :math:`P(i|j) = \langle i | A | j \rangle`.
Args:
qubits: Optional, qubits being measured for operator expval.
Returns:
np.ndarray: the assignment matrix A.
"""
if qubits is None:
qubits = self._qubits
if isinstance(qubits, int):
qubits = [qubits]
mat = self._assignment_mats[qubits[0]]
for i in qubits[1:]:
mat = np.kron(self._assignment_mats[qubits[i]], mat)
return mat
def _compute_gamma(self, qubits=None):
"""Compute gamma for N-qubit mitigation"""
if qubits is None:
gammas = self._gammas
else:
qubit_indices = [self._qubit_index[qubit] for qubit in qubits]
gammas = self._gammas[qubit_indices]
return np.product(gammas)
def stddev_upper_bound(self, shots: int, qubits: List[int] = None):
"""Return an upper bound on standard deviation of expval estimator.
Args:
shots: Number of shots used for expectation value measurement.
qubits: qubits being measured for operator expval.
Returns:
float: the standard deviation upper bound.
"""
gamma = self._compute_gamma(qubits=qubits)
return gamma / np.sqrt(shots)
def _from_backend(self, backend, qubits):
"""Calculates amats from backend properties readout_error"""
backend_qubits = backend.properties().qubits
if qubits is not None:
if any(qubit >= len(backend_qubits) for qubit in qubits):
raise QiskitError("The chosen backend does not contain the specified qubits.")
reduced_backend_qubits = [backend_qubits[i] for i in qubits]
backend_qubits = reduced_backend_qubits
num_qubits = len(backend_qubits)
amats = np.zeros([num_qubits, 2, 2], dtype=float)
for qubit_idx, qubit_prop in enumerate(backend_qubits):
for prop in qubit_prop:
if prop.name == "prob_meas0_prep1":
(amats[qubit_idx])[0, 1] = prop.value
(amats[qubit_idx])[1, 1] = 1 - prop.value
if prop.name == "prob_meas1_prep0":
(amats[qubit_idx])[1, 0] = prop.value
(amats[qubit_idx])[0, 0] = 1 - prop.value
return amats
@property
def qubits(self) -> Tuple[int]:
"""The device qubits for this mitigator"""
return self._qubits

View File

@ -0,0 +1,160 @@
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Readout mitigation data handling utils
"""
import logging
from typing import Optional, List, Tuple, Dict
import numpy as np
from qiskit.exceptions import QiskitError
from ..utils import marginal_counts
from ..counts import Counts
logger = logging.getLogger(__name__)
def z_diagonal(dim, dtype=float):
r"""Return the diagonal for the operator :math:`Z^\otimes n`"""
parity = np.zeros(dim, dtype=dtype)
for i in range(dim):
parity[i] = bin(i)[2:].count("1")
return (-1) ** np.mod(parity, 2)
def expval_with_stddev(coeffs: np.ndarray, probs: np.ndarray, shots: int) -> Tuple[float, float]:
"""Compute expectation value and standard deviation.
Args:
coeffs: array of diagonal operator coefficients.
probs: array of measurement probabilities.
shots: total number of shots to obtain probabilities.
Returns:
tuple: (expval, stddev) expectation value and standard deviation.
"""
# Compute expval
expval = coeffs.dot(probs)
# Compute variance
sq_expval = (coeffs ** 2).dot(probs)
variance = (sq_expval - expval ** 2) / shots
# Compute standard deviation
if variance < 0 and not np.isclose(variance, 0):
logger.warning(
"Encountered a negative variance in expectation value calculation."
"(%f). Setting standard deviation of result to 0.",
variance,
)
calc_stddev = np.sqrt(variance) if variance > 0 else 0.0
return [expval, calc_stddev]
def stddev(probs, shots):
"""Calculate stddev dict"""
ret = {}
for key, prob in probs.items():
std_err = np.sqrt(prob * (1 - prob) / shots)
ret[key] = std_err
return ret
def str2diag(string):
"""Transform diagonal from a string to a numpy array"""
chars = {
"I": np.array([1, 1], dtype=float),
"Z": np.array([1, -1], dtype=float),
"0": np.array([1, 0], dtype=float),
"1": np.array([0, 1], dtype=float),
}
ret = np.array([1], dtype=float)
for i in string:
if i not in chars:
raise QiskitError(f"Invalid diagonal string character {i}")
ret = np.kron(chars[i], ret)
return ret
def counts_to_vector(counts: Counts, num_qubits: int) -> Tuple[np.ndarray, int]:
"""Transforms Counts to a probability vector"""
vec = np.zeros(2 ** num_qubits, dtype=float)
shots = 0
for key, val in counts.items():
shots += val
vec[int(key, 2)] = val
vec /= shots
return vec, shots
def remap_qubits(
vec: np.ndarray, num_qubits: int, qubits: Optional[List[int]] = None
) -> np.ndarray:
"""Remapping the qubits"""
if qubits is not None:
if len(qubits) != num_qubits:
raise QiskitError("Num qubits does not match vector length.")
axes = [num_qubits - 1 - i for i in reversed(np.argsort(qubits))]
vec = np.reshape(vec, num_qubits * [2]).transpose(axes).reshape(vec.shape)
return vec
def marganalize_counts(
counts: Counts,
qubit_index: Dict[int, int],
qubits: Optional[List[int]] = None,
clbits: Optional[List[int]] = None,
) -> np.ndarray:
"""Marginalization of the Counts. Verify that number of clbits equals to the number of qubits."""
if clbits is not None:
qubits_len = len(qubits) if not qubits is None else 0
clbits_len = len(clbits) if not clbits is None else 0
if clbits_len not in (0, qubits_len):
raise QiskitError(
"Num qubits ({}) does not match number of clbits ({}).".format(
qubits_len, clbits_len
)
)
counts = marginal_counts(counts, clbits)
if clbits is None and qubits is not None:
clbits = [qubit_index[qubit] for qubit in qubits]
counts = marginal_counts(counts, clbits)
return counts
def counts_probability_vector(
counts: Counts,
qubit_index: Dict[int, int],
qubits: Optional[List[int]] = None,
clbits: Optional[List[int]] = None,
) -> Tuple[np.ndarray, int]:
"""Compute a probability vector for all count outcomes.
Args:
counts: counts object
qubit_index: For each qubit, its index in the mitigator qubits list
qubits: qubits the count bitstrings correspond to.
clbits: Optional, marginalize counts to just these bits.
Raises:
QiskitError: if qubits and clbits kwargs are not valid.
Returns:
np.ndarray: a probability vector for all count outcomes.
"""
counts = marganalize_counts(counts, qubit_index, qubits, clbits)
if qubits is not None:
num_qubits = len(qubits)
else:
num_qubits = len(qubit_index.keys())
vec, shots = counts_to_vector(counts, num_qubits)
vec = remap_qubits(vec, num_qubits, qubits)
return vec, shots

View File

@ -0,0 +1,55 @@
---
features:
- |
Adds a :class:`~qiskit.result.BaseReadoutMitigator` abstract base class
for implementing classical measurement error mitigators. These objects
are intended for mitigation measurement errors in
:class:`~qiskit.result.Counts` objects returned from execution of circuits
on backends with measurement errors.
Readout mitigator classes have two main methods
* :meth:`expectation_value` which computes an mitigated expectation
value and standard error of a diagonal operator from a noisy
:class:`~qiskit.result.Counts` object.
* :meth:`quasi_probabilities` that computes an error mitigated
:class:`~qiskit.result.QuasiDistribution`, including standard error,
from a noisy counts object.
Note that current :mod:`qiskit.algorithms` and the
:class:`~qiskit.utils.QuantumInstance` class still use the legacy mitigators
migrated from qiskit-ignis in :mod:`qiskit.utils.mitigation`. It is planned
to upgrade code to use the new mitigator classes and deprecate the legacy
mitgation code in a future release.
- |
Adds a :class:`~qiskit.result.LocalReadoutMitigator` class for
performing measurement readout error mitigation of local measurement
errors. Local measuerment errors are those that are described by a
tensor-product of single-qubit measurement errors.
This class can be initialized with a list of *N* single-qubit of measurement
error assignment matrices or from a backend using the readout error
information in the backend properties.
Mitigation is implemented using local assignment-matrix inversion which has
complexity of :math:`O(2^N)` for *N*-qubit mitigation of
:class:`~qiskit.result.QuasiDistribution` and expectation values.
- |
Adds a :class:`~qiskit.result.CorrelatedReadoutMitigator` class for
performing measurement readout error mitigation of correlated measurement
errors. This class can be initialized with a single :math:`2^N x 2^N`
measurement error assignment matrix that descirbes the error probabilities.
Mitigation is implemented via inversion of assigment matrix which has
mitigation complexity of :math:`O(4^N)` of
:class:`~qiskit.result.QuasiDistribution` and expectation values.
- |
Adds :meth:`~qiskit.result.QuasiDistribution.stddev_upper_bound`
property and init kwarg to :meth:`qiskit.result.QuasiDistribution` for
storing standard error in quasi probability estimates. This is used by
:class:`~qiskit.result.BaseReadoutMitigator` classes to store the
standard error in mitigated quasi probabilities.
- |
Adds :meth:`~qiskit.result.Counts.shots` method to
:class:`qiskit.result.Counts` to return the sum of all outcomes in
the counts.

View File

@ -0,0 +1,495 @@
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name
"""Tests for error mitigation routines."""
import unittest
import numpy as np
from numpy import array
from ddt import ddt, data, unpack
from qiskit import QiskitError
from qiskit.test import QiskitTestCase
from qiskit.result import Counts
from qiskit.result import CorrelatedReadoutMitigator
from qiskit.result import LocalReadoutMitigator
from qiskit.result.mitigation.utils import (
z_diagonal,
counts_probability_vector,
str2diag,
expval_with_stddev,
stddev,
)
from qiskit.test.mock import FakeYorktown
from qiskit.quantum_info.operators.predicates import matrix_equal
@ddt
class TestReadoutMitigation(QiskitTestCase):
"""Tests for correlated and local readout mitigation."""
test_data = {
"test_1": {
"local_method_matrices": [
array([[0.996525, 0.002], [0.003475, 0.998]]),
array([[0.991175, 0.00415], [0.008825, 0.99585]]),
array([[0.9886, 0.00565], [0.0114, 0.99435]]),
],
"correlated_method_matrix": array(
[
[
9.771e-01,
1.800e-03,
4.600e-03,
0.000e00,
5.600e-03,
0.000e00,
0.000e00,
0.000e00,
],
[
3.200e-03,
9.799e-01,
0.000e00,
3.400e-03,
0.000e00,
5.800e-03,
0.000e00,
1.000e-04,
],
[
8.000e-03,
0.000e00,
9.791e-01,
2.400e-03,
1.000e-04,
0.000e00,
5.700e-03,
0.000e00,
],
[
0.000e00,
8.300e-03,
3.200e-03,
9.834e-01,
0.000e00,
0.000e00,
0.000e00,
5.300e-03,
],
[
1.170e-02,
0.000e00,
0.000e00,
0.000e00,
9.810e-01,
2.500e-03,
5.000e-03,
0.000e00,
],
[
0.000e00,
9.900e-03,
0.000e00,
0.000e00,
3.900e-03,
9.823e-01,
0.000e00,
3.500e-03,
],
[
0.000e00,
0.000e00,
1.310e-02,
0.000e00,
9.400e-03,
1.000e-04,
9.857e-01,
1.200e-03,
],
[
0.000e00,
1.000e-04,
0.000e00,
1.080e-02,
0.000e00,
9.300e-03,
3.600e-03,
9.899e-01,
],
]
),
"num_qubits": 3,
"shots": 10000,
"circuits": {
"ghz_3_qubits": {
"counts_ideal": {"111": 5000, "000": 5000},
"counts_noise": {
"111": 4955,
"000": 4886,
"001": 16,
"100": 46,
"010": 36,
"101": 23,
"011": 29,
"110": 9,
},
},
"first_qubit_h_3_qubits": {
"counts_ideal": {"000": 5000, "001": 5000},
"counts_noise": {
"000": 4844,
"001": 4962,
"100": 56,
"101": 65,
"011": 37,
"010": 35,
"110": 1,
},
},
},
}
}
def compare_results(self, res1, res2):
"""Compare the results between two runs"""
res1_total_shots = sum(res1.values())
res2_total_shots = sum(res2.values())
keys = set(res1.keys()).union(set(res2.keys()))
total = 0
for key in keys:
val1 = res1.get(key, 0) / res1_total_shots
val2 = res2.get(key, 0) / res2_total_shots
total += abs(val1 - val2) ** 2
return total
@data([test_data["test_1"]])
@unpack
def test_mitigation_improvement(self, circuits_data):
"""Test whether readout mitigation led to more accurate results"""
CRM = CorrelatedReadoutMitigator(circuits_data["correlated_method_matrix"])
LRM = LocalReadoutMitigator(circuits_data["local_method_matrices"])
mitigators = [CRM, LRM]
for circuit_name, circuit_data in circuits_data["circuits"].items():
counts_ideal = Counts(circuit_data["counts_ideal"])
counts_noise = Counts(circuit_data["counts_noise"])
probs_noise = {
key: value / circuits_data["shots"] for key, value in counts_noise.items()
}
unmitigated_error = self.compare_results(counts_ideal, counts_noise)
# TODO: verify mitigated stddev is larger
unmitigated_stddev = stddev(probs_noise, circuits_data["shots"])
for mitigator in mitigators:
mitigated_quasi_probs = mitigator.quasi_probabilities(counts_noise)
mitigated_probs = (
mitigated_quasi_probs.nearest_probability_distribution().binary_probabilities()
)
mitigated_error = self.compare_results(counts_ideal, mitigated_probs)
self.assertTrue(
mitigated_error < unmitigated_error * 0.8,
"Mitigator {} did not improve circuit {} measurements".format(
mitigator, circuit_name
),
)
mitigated_stddev_upper_bound = mitigated_quasi_probs._stddev_upper_bound
max_unmitigated_stddev = max(unmitigated_stddev.values())
self.assertTrue(
mitigated_stddev_upper_bound >= max_unmitigated_stddev,
"Mitigator {} on circuit {} gave stddev upper bound {} "
"while unmitigated stddev maximum is {}".format(
mitigator,
circuit_name,
mitigated_stddev_upper_bound,
max_unmitigated_stddev,
),
)
@data([test_data["test_1"]])
@unpack
def test_expectation_improvement(self, circuits_data):
"""Test whether readout mitigation led to more accurate results
and that its standard deviation is increased"""
CRM = CorrelatedReadoutMitigator(circuits_data["correlated_method_matrix"])
LRM = LocalReadoutMitigator(circuits_data["local_method_matrices"])
num_qubits = circuits_data["num_qubits"]
diagonals = []
diagonals.append(z_diagonal(2 ** num_qubits))
diagonals.append("IZ0")
diagonals.append("ZZZ")
diagonals.append("101")
diagonals.append("IZI")
mitigators = [CRM, LRM]
qubit_index = {i: i for i in range(num_qubits)}
for circuit_name, circuit_data in circuits_data["circuits"].items():
counts_ideal = Counts(circuit_data["counts_ideal"])
counts_noise = Counts(circuit_data["counts_noise"])
probs_ideal, _ = counts_probability_vector(counts_ideal, qubit_index=qubit_index)
probs_noise, _ = counts_probability_vector(counts_noise, qubit_index=qubit_index)
for diagonal in diagonals:
if isinstance(diagonal, str):
diagonal = str2diag(diagonal)
unmitigated_expectation, unmitigated_stddev = expval_with_stddev(
diagonal, probs_noise, shots=counts_noise.shots()
)
ideal_expectation = np.dot(probs_ideal, diagonal)
unmitigated_error = np.abs(ideal_expectation - unmitigated_expectation)
for mitigator in mitigators:
mitigated_expectation, mitigated_stddev = mitigator.expectation_value(
counts_noise, diagonal
)
mitigated_error = np.abs(ideal_expectation - mitigated_expectation)
self.assertTrue(
mitigated_error < unmitigated_error,
"Mitigator {} did not improve circuit {} measurements".format(
mitigator, circuit_name
),
)
self.assertTrue(
mitigated_stddev >= unmitigated_stddev,
"Mitigator {} did not increase circuit {} the standard deviation".format(
mitigator, circuit_name
),
)
@data([test_data["test_1"]])
@unpack
def test_clbits_parameter(self, circuits_data):
"""Test whether the clbits parameter is handled correctly"""
# counts_ideal is {'000': 5000, '001': 5000}
counts_ideal_12 = Counts({"00": 10000})
counts_ideal_02 = Counts({"00": 5000, "01": 5000})
counts_noise = Counts(
{"000": 4844, "001": 4962, "100": 56, "101": 65, "011": 37, "010": 35, "110": 1}
)
CRM = CorrelatedReadoutMitigator(circuits_data["correlated_method_matrix"])
LRM = LocalReadoutMitigator(circuits_data["local_method_matrices"])
mitigators = [CRM, LRM]
for mitigator in mitigators:
mitigated_probs_12 = (
mitigator.quasi_probabilities(counts_noise, qubits=[1, 2], clbits=[1, 2])
.nearest_probability_distribution()
.binary_probabilities()
)
mitigated_error = self.compare_results(counts_ideal_12, mitigated_probs_12)
self.assertTrue(
mitigated_error < 0.001,
"Mitigator {} did not correctly marganalize for qubits 1,2".format(mitigator),
)
mitigated_probs_02 = (
mitigator.quasi_probabilities(counts_noise, qubits=[0, 2], clbits=[0, 2])
.nearest_probability_distribution()
.binary_probabilities()
)
mitigated_error = self.compare_results(counts_ideal_02, mitigated_probs_02)
self.assertTrue(
mitigated_error < 0.001,
"Mitigator {} did not correctly marganalize for qubits 0,2".format(mitigator),
)
@data([test_data["test_1"]])
@unpack
def test_qubits_parameter(self, circuits_data):
"""Test whether the qubits parameter is handled correctly"""
counts_ideal_012 = Counts({"000": 5000, "001": 5000})
counts_ideal_210 = Counts({"000": 5000, "100": 5000})
counts_ideal_102 = Counts({"000": 5000, "010": 5000})
counts_noise = Counts(
{"000": 4844, "001": 4962, "100": 56, "101": 65, "011": 37, "010": 35, "110": 1}
)
CRM = CorrelatedReadoutMitigator(circuits_data["correlated_method_matrix"])
LRM = LocalReadoutMitigator(circuits_data["local_method_matrices"])
mitigators = [CRM, LRM]
for mitigator in mitigators:
mitigated_probs_012 = (
mitigator.quasi_probabilities(counts_noise, qubits=[0, 1, 2])
.nearest_probability_distribution()
.binary_probabilities()
)
mitigated_error = self.compare_results(counts_ideal_012, mitigated_probs_012)
self.assertTrue(
mitigated_error < 0.001,
"Mitigator {} did not correctly handle qubit order 0, 1, 2".format(mitigator),
)
mitigated_probs_210 = (
mitigator.quasi_probabilities(counts_noise, qubits=[2, 1, 0])
.nearest_probability_distribution()
.binary_probabilities()
)
mitigated_error = self.compare_results(counts_ideal_210, mitigated_probs_210)
self.assertTrue(
mitigated_error < 0.001,
"Mitigator {} did not correctly handle qubit order 2, 1, 0".format(mitigator),
)
mitigated_probs_102 = (
mitigator.quasi_probabilities(counts_noise, qubits=[1, 0, 2])
.nearest_probability_distribution()
.binary_probabilities()
)
mitigated_error = self.compare_results(counts_ideal_102, mitigated_probs_102)
self.assertTrue(
mitigated_error < 0.001,
"Mitigator {} did not correctly handle qubit order 1, 0, 2".format(mitigator),
)
@data([test_data["test_1"]])
@unpack
def test_repeated_qubits_parameter(self, circuits_data):
"""Tests the order of mitigated qubits."""
counts_ideal_012 = Counts({"000": 5000, "001": 5000})
counts_ideal_210 = Counts({"000": 5000, "100": 5000})
counts_noise = Counts(
{"000": 4844, "001": 4962, "100": 56, "101": 65, "011": 37, "010": 35, "110": 1}
)
CRM = CorrelatedReadoutMitigator(
circuits_data["correlated_method_matrix"], qubits=[0, 1, 2]
)
LRM = LocalReadoutMitigator(circuits_data["local_method_matrices"], qubits=[0, 1, 2])
mitigators = [CRM, LRM]
for mitigator in mitigators:
mitigated_probs_210 = (
mitigator.quasi_probabilities(counts_noise, qubits=[2, 1, 0])
.nearest_probability_distribution()
.binary_probabilities()
)
mitigated_error = self.compare_results(counts_ideal_210, mitigated_probs_210)
self.assertTrue(
mitigated_error < 0.001,
"Mitigator {} did not correctly handle qubit order 2,1,0".format(mitigator),
)
# checking qubit order 2,1,0 should not "overwrite" the default 0,1,2
mitigated_probs_012 = (
mitigator.quasi_probabilities(counts_noise)
.nearest_probability_distribution()
.binary_probabilities()
)
mitigated_error = self.compare_results(counts_ideal_012, mitigated_probs_012)
self.assertTrue(
mitigated_error < 0.001,
"Mitigator {} did not correctly handle qubit order 0,1,2 (the expected default)".format(
mitigator
),
)
@data([test_data["test_1"]])
@unpack
def test_qubits_subset_parameter(self, circuits_data):
"""Tests mitigation on a subset of the initial set of qubits."""
counts_ideal_2 = Counts({"0": 5000, "1": 5000})
counts_ideal_6 = Counts({"0": 10000})
counts_noise = Counts(
{"000": 4844, "001": 4962, "100": 56, "101": 65, "011": 37, "010": 35, "110": 1}
)
CRM = CorrelatedReadoutMitigator(
circuits_data["correlated_method_matrix"], qubits=[2, 4, 6]
)
LRM = LocalReadoutMitigator(circuits_data["local_method_matrices"], qubits=[2, 4, 6])
mitigators = [CRM, LRM]
for mitigator in mitigators:
mitigated_probs_2 = (
mitigator.quasi_probabilities(counts_noise, qubits=[2])
.nearest_probability_distribution()
.binary_probabilities()
)
mitigated_error = self.compare_results(counts_ideal_2, mitigated_probs_2)
self.assertTrue(
mitigated_error < 0.001,
"Mitigator {} did not correctly handle qubit subset".format(mitigator),
)
mitigated_probs_6 = (
mitigator.quasi_probabilities(counts_noise, qubits=[6])
.nearest_probability_distribution()
.binary_probabilities()
)
mitigated_error = self.compare_results(counts_ideal_6, mitigated_probs_6)
self.assertTrue(
mitigated_error < 0.001,
"Mitigator {} did not correctly handle qubit subset".format(mitigator),
)
diagonal = str2diag("ZZ")
ideal_expectation = 0
mitigated_expectation, _ = mitigator.expectation_value(
counts_noise, diagonal, qubits=[2, 6]
)
mitigated_error = np.abs(ideal_expectation - mitigated_expectation)
self.assertTrue(
mitigated_error < 0.1,
"Mitigator {} did not improve circuit expectation".format(mitigator),
)
def test_from_backend(self):
"""Test whether a local mitigator can be created directly from backend properties"""
backend = FakeYorktown()
num_qubits = len(backend.properties().qubits)
rng = np.random.default_rng(42)
probs = rng.random((num_qubits, 2))
for qubit_idx, qubit_prop in enumerate(backend.properties().qubits):
for prop in qubit_prop:
if prop.name == "prob_meas1_prep0":
prop.value = probs[qubit_idx][0]
if prop.name == "prob_meas0_prep1":
prop.value = probs[qubit_idx][1]
LRM_from_backend = LocalReadoutMitigator(backend=backend)
mats = []
for qubit_idx in range(num_qubits):
mat = np.array(
[
[1 - probs[qubit_idx][0], probs[qubit_idx][1]],
[probs[qubit_idx][0], 1 - probs[qubit_idx][1]],
]
)
mats.append(mat)
LRM_from_matrices = LocalReadoutMitigator(amats=mats)
self.assertTrue(
matrix_equal(
LRM_from_backend.assignment_matrix(), LRM_from_matrices.assignment_matrix()
)
)
def test_error_handling(self):
"""Test that the assignment matrices are valid."""
bad_matrix_A = np.array([[-0.3, 1], [1.3, 0]]) # negative indices
bad_matrix_B = np.array([[0.2, 1], [0.7, 0]]) # columns not summing to 1
good_matrix_A = np.array([[0.2, 1], [0.8, 0]])
for bad_matrix in [bad_matrix_A, bad_matrix_B]:
with self.assertRaises(QiskitError) as cm:
CorrelatedReadoutMitigator(bad_matrix)
self.assertEqual(
cm.exception.message,
"Assignment matrix columns must be valid probability distributions",
)
with self.assertRaises(QiskitError) as cm:
amats = [good_matrix_A, bad_matrix_A]
LocalReadoutMitigator(amats)
self.assertEqual(
cm.exception.message,
"Assignment matrix columns must be valid probability distributions",
)
with self.assertRaises(QiskitError) as cm:
amats = [bad_matrix_B, good_matrix_A]
LocalReadoutMitigator(amats)
self.assertEqual(
cm.exception.message,
"Assignment matrix columns must be valid probability distributions",
)
if __name__ == "__main__":
unittest.main()