mirror of https://github.com/Qiskit/qiskit.git
Add settings property to optimizer classes in ``qiskit.algorithms.optimizers`` (#6545)
* serialize (most) optimizers + test * reduce duplicated code * support any iterator in serialization by converting the learning rates to arrays. This also means we don't need an additional module for iteratos * test iterators being arrays * serialize rest of the optimizers * add reno * fix lint * missed return type * lint * move from_dict to Optimizer only * add more tests - on invalid names - on initializing QNSPSA w/o "fidelity" * rename to_dict to settings and move all the construction logic to Optimizer.from_dict * fix tests & lint * try fixing tests * lint! * rm name from settings * include callback & lse_solver in settings * forgot to black * update docstring & reno * fix lint * add safeguard to GSLS settings * don't deprecate setting for now * implement settings for nlopts * don't make settings abstract for bkwd compat Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com>
This commit is contained in:
parent
cea50ed6f3
commit
0d9cf38a8c
|
@ -12,7 +12,7 @@
|
|||
|
||||
"""The Adam and AMSGRAD optimizers."""
|
||||
|
||||
from typing import Optional, Callable, Tuple, List
|
||||
from typing import Any, Optional, Callable, Dict, Tuple, List
|
||||
import os
|
||||
|
||||
import csv
|
||||
|
@ -120,6 +120,20 @@ class ADAM(Optimizer):
|
|||
writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
|
||||
writer.writeheader()
|
||||
|
||||
@property
|
||||
def settings(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"maxiter": self._maxiter,
|
||||
"tol": self._tol,
|
||||
"lr": self._lr,
|
||||
"beta_1": self._beta_1,
|
||||
"beta_2": self._beta_2,
|
||||
"noise_factor": self._noise_factor,
|
||||
"eps": self._eps,
|
||||
"amsgrad": self._amsgrad,
|
||||
"snapshot_dir": self._snapshot_dir,
|
||||
}
|
||||
|
||||
def get_support_level(self):
|
||||
"""Return support level dictionary"""
|
||||
return {
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
"""Analytical Quantum Gradient Descent (AQGD) optimizer."""
|
||||
|
||||
import logging
|
||||
from typing import Callable, Tuple, List, Dict, Union
|
||||
from typing import Callable, Tuple, List, Dict, Union, Any
|
||||
|
||||
import numpy as np
|
||||
from qiskit.utils.validation import validate_range_exclusive_max
|
||||
|
@ -116,6 +116,17 @@ class AQGD(Optimizer):
|
|||
"initial_point": OptimizerSupportLevel.required,
|
||||
}
|
||||
|
||||
@property
|
||||
def settings(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"maxiter": self._maxiter,
|
||||
"eta": self._eta,
|
||||
"momentum": self._momenta_coeff,
|
||||
"param_tol": self._param_tol,
|
||||
"tol": self._tol,
|
||||
"averaging": self._averaging,
|
||||
}
|
||||
|
||||
def _compute_objective_fn_and_gradient(
|
||||
self, params: List[float], obj: Callable
|
||||
) -> Tuple[float, np.array]:
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
|
||||
"""Bound Optimization BY Quadratic Approximation (BOBYQA) optimizer."""
|
||||
|
||||
from typing import Any, Dict
|
||||
|
||||
import numpy as np
|
||||
from qiskit.exceptions import MissingOptionalLibraryError
|
||||
|
@ -62,6 +63,10 @@ class BOBYQA(Optimizer):
|
|||
"initial_point": OptimizerSupportLevel.required,
|
||||
}
|
||||
|
||||
@property
|
||||
def settings(self) -> Dict[str, Any]:
|
||||
return {"maxiter": self._maxiter}
|
||||
|
||||
def optimize(
|
||||
self,
|
||||
num_vars,
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
|
||||
"""A standard gradient descent optimizer."""
|
||||
|
||||
from typing import Iterator, Optional, Union, Callable
|
||||
from typing import Iterator, Optional, Union, Callable, Dict, Any
|
||||
from functools import partial
|
||||
|
||||
import numpy as np
|
||||
|
@ -132,6 +132,23 @@ class GradientDescent(Optimizer):
|
|||
self.tol = tol
|
||||
self.callback = callback
|
||||
|
||||
@property
|
||||
def settings(self) -> Dict[str, Any]:
|
||||
# if learning rate or perturbation are custom iterators expand them
|
||||
if callable(self.learning_rate):
|
||||
iterator = self.learning_rate()
|
||||
learning_rate = np.array([next(iterator) for _ in range(self.maxiter)])
|
||||
else:
|
||||
learning_rate = self.learning_rate
|
||||
|
||||
return {
|
||||
"maxiter": self.maxiter,
|
||||
"tol": self.tol,
|
||||
"learning_rate": learning_rate,
|
||||
"perturbation": self.perturbation,
|
||||
"callback": self.callback,
|
||||
}
|
||||
|
||||
def _minimize(self, loss, grad, initial_point):
|
||||
# set learning rate
|
||||
if isinstance(self.learning_rate, float):
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
|
||||
"""Line search with Gaussian-smoothed samples on a sphere."""
|
||||
|
||||
from typing import Dict, Optional, Tuple, List, Callable
|
||||
from typing import Dict, Optional, Tuple, List, Callable, Any
|
||||
import numpy as np
|
||||
|
||||
from qiskit.utils import algorithm_globals
|
||||
|
@ -97,6 +97,10 @@ class GSLS(Optimizer):
|
|||
"initial_point": OptimizerSupportLevel.required,
|
||||
}
|
||||
|
||||
@property
|
||||
def settings(self) -> Dict[str, Any]:
|
||||
return {key: self._options.get(key, None) for key in self._OPTIONS}
|
||||
|
||||
def optimize(
|
||||
self,
|
||||
num_vars: int,
|
||||
|
|
|
@ -12,6 +12,8 @@
|
|||
|
||||
"""IMplicit FILtering (IMFIL) optimizer."""
|
||||
|
||||
from typing import Any, Dict
|
||||
|
||||
from qiskit.exceptions import MissingOptionalLibraryError
|
||||
from .optimizer import Optimizer, OptimizerSupportLevel
|
||||
|
||||
|
@ -62,6 +64,12 @@ class IMFIL(Optimizer):
|
|||
"initial_point": OptimizerSupportLevel.required,
|
||||
}
|
||||
|
||||
@property
|
||||
def settings(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"maxiter": self._maxiter,
|
||||
}
|
||||
|
||||
def optimize(
|
||||
self,
|
||||
num_vars,
|
||||
|
|
|
@ -96,6 +96,10 @@ class NLoptOptimizer(Optimizer):
|
|||
"initial_point": OptimizerSupportLevel.required,
|
||||
}
|
||||
|
||||
@property
|
||||
def settings(self):
|
||||
return {"max_evals": self._options.get("max_evals", 1000)}
|
||||
|
||||
def optimize(
|
||||
self,
|
||||
num_vars,
|
||||
|
|
|
@ -12,6 +12,8 @@
|
|||
|
||||
"""Optimizer interface"""
|
||||
|
||||
from typing import Dict, Any
|
||||
|
||||
from enum import IntEnum
|
||||
import logging
|
||||
from abc import ABC, abstractmethod
|
||||
|
@ -147,6 +149,23 @@ class Optimizer(ABC):
|
|||
ret += f"{params}"
|
||||
return ret
|
||||
|
||||
@property
|
||||
def settings(self) -> Dict[str, Any]:
|
||||
"""The optimizer settings in a dictionary format.
|
||||
|
||||
The settings can for instance be used for JSON-serialization (if all settings are
|
||||
serializable, which e.g. doesn't hold per default for callables), such that the
|
||||
optimizer object can be reconstructed as
|
||||
|
||||
.. code-block::
|
||||
|
||||
settings = optimizer.settings
|
||||
# JSON serialize and send to another server
|
||||
optimizer = OptimizerClass(**settings)
|
||||
|
||||
"""
|
||||
raise NotImplementedError("The settings method is not implemented per default.")
|
||||
|
||||
@abstractmethod
|
||||
def optimize(
|
||||
self,
|
||||
|
|
|
@ -10,9 +10,9 @@
|
|||
# copyright notice, and modified files need to carry a notice indicating
|
||||
# that they have been altered from the originals.
|
||||
|
||||
"""A generalized SPSA optimizer including support for Hessians."""
|
||||
"""The QN-SPSA optimizer."""
|
||||
|
||||
from typing import Iterator, Optional, Union, Callable, Dict
|
||||
from typing import Any, Iterator, Optional, Union, Callable, Dict
|
||||
|
||||
import numpy as np
|
||||
from qiskit.providers import Backend
|
||||
|
@ -189,6 +189,26 @@ class QNSPSA(SPSA):
|
|||
|
||||
return gradient_estimate, hessian_estimate
|
||||
|
||||
@property
|
||||
def settings(self) -> Dict[str, Any]:
|
||||
"""The optimizer settings in a dictionary format.
|
||||
|
||||
.. note::
|
||||
|
||||
The ``fidelity`` property cannot be serialized and will not be contained
|
||||
in the dictionary. To construct a ``QNSPSA`` object from a dictionary you
|
||||
have to add it manually with the key ``"fidelity"``.
|
||||
|
||||
"""
|
||||
# re-use serialization from SPSA
|
||||
settings = super().settings
|
||||
|
||||
# remove SPSA-specific arguments not in QNSPSA
|
||||
settings.pop("trust_region")
|
||||
settings.pop("second_order")
|
||||
|
||||
return settings
|
||||
|
||||
@staticmethod
|
||||
def get_fidelity(
|
||||
circuit: QuantumCircuit,
|
||||
|
|
|
@ -85,6 +85,19 @@ class SciPyOptimizer(Optimizer):
|
|||
"initial_point": self._initial_point_support_level,
|
||||
}
|
||||
|
||||
@property
|
||||
def settings(self) -> Dict[str, Any]:
|
||||
settings = {
|
||||
"max_evals_grouped": self._max_evals_grouped,
|
||||
"options": self._options,
|
||||
**self._kwargs,
|
||||
}
|
||||
# the subclasses don't need the "method" key as the class type specifies the method
|
||||
if self.__class__ == SciPyOptimizer:
|
||||
settings["method"] = self._method
|
||||
|
||||
return settings
|
||||
|
||||
def optimize(
|
||||
self,
|
||||
num_vars,
|
||||
|
|
|
@ -12,6 +12,8 @@
|
|||
|
||||
"""Stable Noisy Optimization by Branch and FIT algorithm (SNOBFIT) optimizer."""
|
||||
|
||||
from typing import Any, Dict
|
||||
|
||||
import numpy as np
|
||||
from qiskit.exceptions import MissingOptionalLibraryError
|
||||
from .optimizer import Optimizer, OptimizerSupportLevel
|
||||
|
@ -84,6 +86,15 @@ class SNOBFIT(Optimizer):
|
|||
"initial_point": OptimizerSupportLevel.required,
|
||||
}
|
||||
|
||||
@property
|
||||
def settings(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"maxiter": self._maxiter,
|
||||
"maxfail": self._maxfail,
|
||||
"maxmp": self._maxmp,
|
||||
"verbose": self._verbose,
|
||||
}
|
||||
|
||||
def optimize(
|
||||
self,
|
||||
num_vars,
|
||||
|
|
|
@ -131,8 +131,8 @@ class SPSA(Optimizer):
|
|||
blocking: bool = False,
|
||||
allowed_increase: Optional[float] = None,
|
||||
trust_region: bool = False,
|
||||
learning_rate: Optional[Union[float, Callable[[], Iterator]]] = None,
|
||||
perturbation: Optional[Union[float, Callable[[], Iterator]]] = None,
|
||||
learning_rate: Optional[Union[float, np.array, Callable[[], Iterator]]] = None,
|
||||
perturbation: Optional[Union[float, np.array, Callable[[], Iterator]]] = None,
|
||||
last_avg: int = 1,
|
||||
resamplings: Union[int, Dict[int, int]] = 1,
|
||||
perturbation_dims: Optional[int] = None,
|
||||
|
@ -156,12 +156,12 @@ class SPSA(Optimizer):
|
|||
trust_region: If ``True``, restricts the norm of the update step to be :math:`\leq 1`.
|
||||
learning_rate: The update step is the learning rate is multiplied with the gradient.
|
||||
If the learning rate is a float, it remains constant over the course of the
|
||||
optimization. It can also be a callable returning an iterator which yields the
|
||||
learning rates for each optimization step.
|
||||
optimization. If a NumPy array, the :math:`i`-th element is the learning rate for
|
||||
the :math:`i`-th iteration. It can also be a callable returning an iterator which
|
||||
yields the learning rates for each optimization step.
|
||||
If ``learning_rate`` is set ``perturbation`` must also be provided.
|
||||
perturbation: Specifies the magnitude of the perturbation for the finite difference
|
||||
approximation of the gradients. Can be either a float or a generator yielding
|
||||
the perturbation magnitudes per step.
|
||||
approximation of the gradients. See ``learning_rate`` for the supported types.
|
||||
If ``perturbation`` is set ``learning_rate`` must also be provided.
|
||||
last_avg: Return the average of the ``last_avg`` parameters instead of just the
|
||||
last parameter values.
|
||||
|
@ -190,6 +190,10 @@ class SPSA(Optimizer):
|
|||
callback: A callback function passed information in each iteration step. The
|
||||
information is, in this order: the number of function evaluations, the parameters,
|
||||
the function value, the stepsize, whether the step was accepted.
|
||||
|
||||
Raises:
|
||||
ValueError: If ``learning_rate`` or ``perturbation`` is an array with less elements
|
||||
than the number of iterations.
|
||||
"""
|
||||
super().__init__()
|
||||
|
||||
|
@ -198,15 +202,14 @@ class SPSA(Optimizer):
|
|||
self.trust_region = trust_region
|
||||
self.callback = callback
|
||||
|
||||
if isinstance(learning_rate, float):
|
||||
self.learning_rate = lambda: constant(learning_rate)
|
||||
else:
|
||||
self.learning_rate = learning_rate
|
||||
# if learning rate and perturbation are arrays, check they are sufficiently long
|
||||
for attr, name in zip([learning_rate, perturbation], ["learning_rate", "perturbation"]):
|
||||
if isinstance(attr, (list, np.ndarray)):
|
||||
if len(attr) < maxiter:
|
||||
raise ValueError(f"Length of {name} is smaller than maxiter ({maxiter}).")
|
||||
|
||||
if isinstance(perturbation, float):
|
||||
self.perturbation = lambda: constant(perturbation)
|
||||
else:
|
||||
self.perturbation = perturbation
|
||||
self.learning_rate = learning_rate
|
||||
self.perturbation = perturbation
|
||||
|
||||
# SPSA specific arguments
|
||||
self.blocking = blocking
|
||||
|
@ -219,9 +222,6 @@ class SPSA(Optimizer):
|
|||
if regularization is None:
|
||||
regularization = 0.01
|
||||
|
||||
if lse_solver is None:
|
||||
lse_solver = np.linalg.solve
|
||||
|
||||
self.second_order = second_order
|
||||
self.hessian_delay = hessian_delay
|
||||
self.lse_solver = lse_solver
|
||||
|
@ -311,6 +311,38 @@ class SPSA(Optimizer):
|
|||
losses = [loss(initial_point) for _ in range(avg)]
|
||||
return np.std(losses)
|
||||
|
||||
@property
|
||||
def settings(self):
|
||||
# if learning rate or perturbation are custom iterators expand them
|
||||
if callable(self.learning_rate):
|
||||
iterator = self.learning_rate()
|
||||
learning_rate = np.array([next(iterator) for _ in range(self.maxiter)])
|
||||
else:
|
||||
learning_rate = self.learning_rate
|
||||
|
||||
if callable(self.perturbation):
|
||||
iterator = self.perturbation()
|
||||
perturbation = np.array([next(iterator) for _ in range(self.maxiter)])
|
||||
else:
|
||||
perturbation = self.perturbation
|
||||
|
||||
return {
|
||||
"maxiter": self.maxiter,
|
||||
"learning_rate": learning_rate,
|
||||
"perturbation": perturbation,
|
||||
"trust_region": self.trust_region,
|
||||
"blocking": self.blocking,
|
||||
"allowed_increase": self.allowed_increase,
|
||||
"resamplings": self.resamplings,
|
||||
"perturbation_dims": self.perturbation_dims,
|
||||
"second_order": self.second_order,
|
||||
"hessian_delay": self.hessian_delay,
|
||||
"regularization": self.regularization,
|
||||
"lse_solver": self.lse_solver,
|
||||
"initial_hessian": self.initial_hessian,
|
||||
"callback": self.callback,
|
||||
}
|
||||
|
||||
def _point_sample(self, loss, x, eps, delta1, delta2):
|
||||
"""A single sample of the gradient at position ``x`` in direction ``delta``."""
|
||||
# points to evaluate
|
||||
|
@ -368,7 +400,7 @@ class SPSA(Optimizer):
|
|||
|
||||
return gradient_estimate / num_samples, hessian_estimate / num_samples
|
||||
|
||||
def _compute_update(self, loss, x, k, eps):
|
||||
def _compute_update(self, loss, x, k, eps, lse_solver):
|
||||
# compute the perturbations
|
||||
if isinstance(self.resamplings, dict):
|
||||
num_samples = self.resamplings.get(k, 1)
|
||||
|
@ -387,7 +419,7 @@ class SPSA(Optimizer):
|
|||
spd_hessian = _make_spd(smoothed, self.regularization)
|
||||
|
||||
# solve for the gradient update
|
||||
gradient = np.real(self.lse_solver(spd_hessian, gradient))
|
||||
gradient = np.real(lse_solver(spd_hessian, gradient))
|
||||
|
||||
return gradient
|
||||
|
||||
|
@ -395,16 +427,17 @@ class SPSA(Optimizer):
|
|||
# ensure learning rate and perturbation are correctly set: either none or both
|
||||
# this happens only here because for the calibration the loss function is required
|
||||
if self.learning_rate is None and self.perturbation is None:
|
||||
get_learning_rate, get_perturbation = self.calibrate(loss, initial_point)
|
||||
# get iterator
|
||||
eta = get_learning_rate()
|
||||
eps = get_perturbation()
|
||||
elif self.learning_rate is None or self.perturbation is None:
|
||||
raise ValueError("If one of learning rate or perturbation is set, both must be set.")
|
||||
get_eta, get_eps = self.calibrate(loss, initial_point)
|
||||
else:
|
||||
# get iterator
|
||||
eta = self.learning_rate()
|
||||
eps = self.perturbation()
|
||||
get_eta, get_eps = _validate_pert_and_learningrate(
|
||||
self.perturbation, self.learning_rate
|
||||
)
|
||||
eta, eps = get_eta(), get_eps()
|
||||
|
||||
if self.lse_solver is None:
|
||||
lse_solver = np.linalg.solve
|
||||
else:
|
||||
lse_solver = self.lse_solver
|
||||
|
||||
# prepare some initials
|
||||
x = np.asarray(initial_point)
|
||||
|
@ -433,7 +466,7 @@ class SPSA(Optimizer):
|
|||
for k in range(1, self.maxiter + 1):
|
||||
iteration_start = time()
|
||||
# compute update
|
||||
update = self._compute_update(loss, x, k, next(eps))
|
||||
update = self._compute_update(loss, x, k, next(eps), lse_solver)
|
||||
|
||||
# trust region
|
||||
if self.trust_region:
|
||||
|
@ -583,3 +616,36 @@ def _make_spd(matrix, bias=0.01):
|
|||
identity = np.identity(matrix.shape[0])
|
||||
psd = scipy.linalg.sqrtm(matrix.dot(matrix))
|
||||
return psd + bias * identity
|
||||
|
||||
|
||||
def _validate_pert_and_learningrate(perturbation, learning_rate):
|
||||
if learning_rate is None or perturbation is None:
|
||||
raise ValueError("If one of learning rate or perturbation is set, both must be set.")
|
||||
|
||||
if isinstance(perturbation, float):
|
||||
|
||||
def get_eps():
|
||||
return constant(perturbation)
|
||||
|
||||
elif isinstance(perturbation, (list, np.ndarray)):
|
||||
|
||||
def get_eps():
|
||||
return iter(perturbation)
|
||||
|
||||
else:
|
||||
get_eps = perturbation
|
||||
|
||||
if isinstance(learning_rate, float):
|
||||
|
||||
def get_eta():
|
||||
return constant(learning_rate)
|
||||
|
||||
elif isinstance(learning_rate, (list, np.ndarray)):
|
||||
|
||||
def get_eta():
|
||||
return iter(learning_rate)
|
||||
|
||||
else:
|
||||
get_eta = learning_rate
|
||||
|
||||
return get_eta, get_eps
|
||||
|
|
|
@ -0,0 +1,6 @@
|
|||
---
|
||||
features:
|
||||
- |
|
||||
Add a :meth:`~qiskit.algorithms.optimizers.Optimizer.settings` property to the optimizers to
|
||||
return the optimizer settings in form of a dictionary. This can e.g. be used to JSON-serialize
|
||||
the optimizers -- if you use an appropriate encoder for the dictionary values.
|
|
@ -14,11 +14,16 @@
|
|||
|
||||
import unittest
|
||||
from test.python.algorithms import QiskitAlgorithmsTestCase
|
||||
|
||||
from ddt import ddt, data, unpack
|
||||
import numpy as np
|
||||
from scipy.optimize import rosen, rosen_der
|
||||
|
||||
from qiskit.algorithms.optimizers import (
|
||||
ADAM,
|
||||
AQGD,
|
||||
BOBYQA,
|
||||
IMFIL,
|
||||
CG,
|
||||
COBYLA,
|
||||
GSLS,
|
||||
|
@ -29,11 +34,20 @@ from qiskit.algorithms.optimizers import (
|
|||
POWELL,
|
||||
SLSQP,
|
||||
SPSA,
|
||||
QNSPSA,
|
||||
TNC,
|
||||
SciPyOptimizer,
|
||||
)
|
||||
from qiskit.circuit.library import RealAmplitudes
|
||||
from qiskit.utils import algorithm_globals
|
||||
|
||||
try:
|
||||
import skquant.opt as skq # pylint: disable=unused-import
|
||||
|
||||
_HAS_SKQUANT = True
|
||||
except ImportError:
|
||||
_HAS_SKQUANT = False
|
||||
|
||||
|
||||
class TestOptimizers(QiskitAlgorithmsTestCase):
|
||||
"""Test Optimizers"""
|
||||
|
@ -155,5 +169,188 @@ class TestOptimizers(QiskitAlgorithmsTestCase):
|
|||
self.assertTrue(values) # Check the list is nonempty.
|
||||
|
||||
|
||||
@ddt
|
||||
class TestOptimizerSerialization(QiskitAlgorithmsTestCase):
|
||||
"""Tests concerning the serialization of optimizers."""
|
||||
|
||||
@data(
|
||||
("BFGS", {"maxiter": 100, "eps": np.array([0.1])}),
|
||||
("CG", {"maxiter": 200, "gtol": 1e-8}),
|
||||
("COBYLA", {"maxiter": 10}),
|
||||
("L_BFGS_B", {"maxiter": 30}),
|
||||
("NELDER_MEAD", {"maxiter": 0}),
|
||||
("NFT", {"maxiter": 100}),
|
||||
("P_BFGS", {"maxiter": 5}),
|
||||
("POWELL", {"maxiter": 1}),
|
||||
("SLSQP", {"maxiter": 400}),
|
||||
("TNC", {"maxiter": 20}),
|
||||
("dogleg", {"maxiter": 100}),
|
||||
("trust-constr", {"maxiter": 10}),
|
||||
("trust-ncg", {"maxiter": 100}),
|
||||
("trust-exact", {"maxiter": 120}),
|
||||
("trust-krylov", {"maxiter": 150}),
|
||||
)
|
||||
@unpack
|
||||
def test_scipy(self, method, options):
|
||||
"""Test the SciPyOptimizer is serializable."""
|
||||
|
||||
optimizer = SciPyOptimizer(method, options=options)
|
||||
serialized = optimizer.settings
|
||||
from_dict = SciPyOptimizer(**serialized)
|
||||
|
||||
self.assertEqual(from_dict._method, method.lower())
|
||||
self.assertEqual(from_dict._options, options)
|
||||
|
||||
def test_adam(self):
|
||||
"""Test ADAM is serializable."""
|
||||
|
||||
adam = ADAM(maxiter=100, amsgrad=True)
|
||||
settings = adam.settings
|
||||
|
||||
self.assertEqual(settings["maxiter"], 100)
|
||||
self.assertTrue(settings["amsgrad"])
|
||||
|
||||
def test_aqgd(self):
|
||||
"""Test AQGD is serializable."""
|
||||
|
||||
opt = AQGD(maxiter=[200, 100], eta=[0.2, 0.1], momentum=[0.25, 0.1])
|
||||
settings = opt.settings
|
||||
|
||||
self.assertListEqual(settings["maxiter"], [200, 100])
|
||||
self.assertListEqual(settings["eta"], [0.2, 0.1])
|
||||
self.assertListEqual(settings["momentum"], [0.25, 0.1])
|
||||
|
||||
@unittest.skipIf(not _HAS_SKQUANT, "Install scikit-quant to run this test.")
|
||||
def test_bobyqa(self):
|
||||
"""Test BOBYQA is serializable."""
|
||||
|
||||
opt = BOBYQA(maxiter=200)
|
||||
settings = opt.settings
|
||||
|
||||
self.assertEqual(settings["maxiter"], 200)
|
||||
|
||||
@unittest.skipIf(not _HAS_SKQUANT, "Install scikit-quant to run this test.")
|
||||
def test_imfil(self):
|
||||
"""Test IMFIL is serializable."""
|
||||
|
||||
opt = IMFIL(maxiter=200)
|
||||
settings = opt.settings
|
||||
|
||||
self.assertEqual(settings["maxiter"], 200)
|
||||
|
||||
def test_gradient_descent(self):
|
||||
"""Test GradientDescent is serializable."""
|
||||
|
||||
opt = GradientDescent(maxiter=10, learning_rate=0.01)
|
||||
settings = opt.settings
|
||||
|
||||
self.assertEqual(settings["maxiter"], 10)
|
||||
self.assertEqual(settings["learning_rate"], 0.01)
|
||||
|
||||
def test_gsls(self):
|
||||
"""Test GSLS is serializable."""
|
||||
|
||||
opt = GSLS(maxiter=100, sampling_radius=1e-3)
|
||||
settings = opt.settings
|
||||
|
||||
self.assertEqual(settings["maxiter"], 100)
|
||||
self.assertEqual(settings["sampling_radius"], 1e-3)
|
||||
|
||||
def test_spsa(self):
|
||||
"""Test SPSA optimizer is serializable."""
|
||||
options = {
|
||||
"maxiter": 100,
|
||||
"blocking": True,
|
||||
"allowed_increase": 0.1,
|
||||
"second_order": True,
|
||||
"learning_rate": 0.02,
|
||||
"perturbation": 0.05,
|
||||
"regularization": 0.1,
|
||||
"resamplings": 2,
|
||||
"perturbation_dims": 5,
|
||||
"trust_region": False,
|
||||
"initial_hessian": None,
|
||||
"lse_solver": None,
|
||||
"hessian_delay": 0,
|
||||
"callback": None,
|
||||
}
|
||||
spsa = SPSA(**options)
|
||||
|
||||
self.assertDictEqual(spsa.settings, options)
|
||||
|
||||
def test_spsa_custom_iterators(self):
|
||||
"""Test serialization works with custom iterators for learning rate and perturbation."""
|
||||
rate = 0.99
|
||||
|
||||
def powerlaw():
|
||||
n = 0
|
||||
while True:
|
||||
yield rate ** n
|
||||
n += 1
|
||||
|
||||
def steps():
|
||||
n = 1
|
||||
divide_after = 20
|
||||
epsilon = 0.5
|
||||
while True:
|
||||
yield epsilon
|
||||
n += 1
|
||||
if n % divide_after == 0:
|
||||
epsilon /= 2
|
||||
|
||||
learning_rate = powerlaw()
|
||||
expected_learning_rate = np.array([next(learning_rate) for _ in range(200)])
|
||||
|
||||
perturbation = steps()
|
||||
expected_perturbation = np.array([next(perturbation) for _ in range(200)])
|
||||
|
||||
spsa = SPSA(maxiter=200, learning_rate=powerlaw, perturbation=steps)
|
||||
settings = spsa.settings
|
||||
|
||||
self.assertTrue(np.allclose(settings["learning_rate"], expected_learning_rate))
|
||||
self.assertTrue(np.allclose(settings["perturbation"], expected_perturbation))
|
||||
|
||||
def test_qnspsa(self):
|
||||
"""Test QN-SPSA optimizer is serializable."""
|
||||
ansatz = RealAmplitudes(1)
|
||||
fidelity = QNSPSA.get_fidelity(ansatz)
|
||||
options = {
|
||||
"fidelity": fidelity,
|
||||
"maxiter": 100,
|
||||
"blocking": True,
|
||||
"allowed_increase": 0.1,
|
||||
"learning_rate": 0.02,
|
||||
"perturbation": 0.05,
|
||||
"regularization": 0.1,
|
||||
"resamplings": 2,
|
||||
"perturbation_dims": 5,
|
||||
"lse_solver": None,
|
||||
"initial_hessian": None,
|
||||
"callback": None,
|
||||
"hessian_delay": 0,
|
||||
}
|
||||
spsa = QNSPSA(**options)
|
||||
|
||||
settings = spsa.settings
|
||||
expected = options.copy()
|
||||
expected.pop("fidelity") # fidelity cannot be serialized
|
||||
|
||||
with self.subTest(msg="check constructed dictionary"):
|
||||
self.assertDictEqual(settings, expected)
|
||||
|
||||
# no idea why pylint complains about unexpected args (like "second_order") which are
|
||||
# definitely not in the settings dict
|
||||
# pylint: disable=unexpected-keyword-arg
|
||||
with self.subTest(msg="fidelity missing"):
|
||||
# fidelity cannot be serialized, so it must be added back in
|
||||
with self.assertRaises(TypeError):
|
||||
_ = QNSPSA(**settings)
|
||||
|
||||
settings["fidelity"] = fidelity
|
||||
reconstructed = QNSPSA(**settings)
|
||||
with self.subTest(msg="test reconstructed optimizer"):
|
||||
self.assertDictEqual(reconstructed.settings, expected)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
|
@ -118,6 +118,20 @@ class TestSPSA(QiskitAlgorithmsTestCase):
|
|||
|
||||
self.assertAlmostEqual(np.linalg.norm(result), 2, places=2)
|
||||
|
||||
def test_learning_rate_perturbation_as_arrays(self):
|
||||
"""Test the learning rate and perturbation can be arrays."""
|
||||
|
||||
learning_rate = np.linspace(1, 0, num=100, endpoint=False) ** 2
|
||||
perturbation = 0.01 * np.ones(100)
|
||||
|
||||
def objective(x):
|
||||
return (np.linalg.norm(x) - 2) ** 2
|
||||
|
||||
spsa = SPSA(learning_rate=learning_rate, perturbation=perturbation)
|
||||
result, _, _ = spsa.optimize(1, objective, initial_point=np.array([0.5, 0.5]))
|
||||
|
||||
self.assertAlmostEqual(np.linalg.norm(result), 2, places=2)
|
||||
|
||||
def test_callback(self):
|
||||
"""Test using the callback."""
|
||||
|
||||
|
|
|
@ -687,7 +687,7 @@ class TestGradients(QiskitOpflowTestCase):
|
|||
|
||||
qc = RealAmplitudes(2, reps=1)
|
||||
grad_op = ListOp([StateFn(qc)], combo_fn=combo_fn, grad_combo_fn=grad_combo_fn)
|
||||
grad = Gradient(grad_method=method).convert(grad_op, qc.ordered_parameters)
|
||||
grad = Gradient(grad_method=method).convert(grad_op)
|
||||
value_dict = dict(zip(qc.ordered_parameters, np.random.rand(len(qc.ordered_parameters))))
|
||||
correct_values = [
|
||||
[(-0.16666259133549044 + 0j)],
|
||||
|
|
Loading…
Reference in New Issue