Pulse sim initial refactor (#649)

Co-authored-by: vvilpas <vvilpas@gmail.com>
Co-authored-by: Victor Villar <59838221+vvilpas@users.noreply.github.com>
This commit is contained in:
Daniel Puzzuoli 2020-04-17 19:24:11 -04:00 committed by GitHub
parent 7816fa0102
commit 91840b4db1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
85 changed files with 2716 additions and 7381 deletions

View File

@ -103,7 +103,7 @@ stage_osx: &stage_osx
- pip install dist/qiskit_aer*whl
script:
- stestr run --slowest
###############################################################################
# Stage-related definitions
###############################################################################

View File

@ -269,8 +269,7 @@ set(AER_LIBRARIES
if(SKBUILD) # Terra Addon build
add_muparserx_lib()
add_subdirectory(qiskit/providers/aer/pulse/cy)
add_subdirectory(qiskit/providers/aer/pulse/qutip_lite/cy)
add_subdirectory(qiskit/providers/aer/pulse/qutip_extra_lite/cy)
add_subdirectory(qiskit/providers/aer/backends/wrappers)
add_subdirectory(src/open_pulse)
else() # Standalone build

View File

@ -8,8 +8,7 @@ graft src
graft contrib
include qiskit/providers/aer/backends/wrappers/CMakeLists.txt
include qiskit/providers/aer/backends/wrappers/bindings.cc
include qiskit/providers/aer/pulse/cy/CMakeLists.txt
include qiskit/providers/aer/pulse/qutip_lite/cy/CMakeLists.txt
include qiskit/providers/aer/pulse/qutip_extra_lite/cy/CMakeLists.txt
include qiskit/providers/aer/VERSION.txt
include CMakeLists.txt
include cmake/*.cmake

View File

@ -25,8 +25,7 @@ from qiskit.providers.models import BackendConfiguration, PulseDefaults
from .aerbackend import AerBackend
from ..aerjob import AerJob
from ..version import __version__
from ..pulse.qobj.digest import digest_pulse_obj
from ..pulse.solver.opsolve import opsolve
from ..pulse.controllers.pulse_controller import pulse_controller
logger = logging.getLogger(__name__)
@ -140,9 +139,8 @@ class PulseSimulator(AerBackend):
start = time.time()
if validate:
self._validate(qobj, backend_options, noise_model=None)
# Send to solver
openpulse_system = digest_pulse_obj(qobj, system_model, backend_options)
results = opsolve(openpulse_system)
# Send problem specification to pulse_controller and get results
results = pulse_controller(qobj, system_model, backend_options)
end = time.time()
return self._format_results(job_id, results, end - start, qobj.qobj_id)

View File

@ -2,7 +2,7 @@
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
# (C) Copyright IBM 2018, 2019, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
@ -47,10 +47,10 @@ of systems.
# pylint: disable=import-error
import distutils.sysconfig # noqa
import numpy as np
from .qutip_lite.cy import pyxbuilder as pbldr
from .qutip_extra_lite.cy import pyxbuilder as pbldr
from .duffing_model_generators import duffing_system_model
from .pulse_system_model import PulseSystemModel
from .system_models.duffing_model_generators import duffing_system_model
from .system_models.pulse_system_model import PulseSystemModel
# Remove -Wstrict-prototypes from cflags
CFG_VARS = distutils.sysconfig.get_config_vars()

View File

@ -0,0 +1,19 @@
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Controllers are general simulation routines that orchestrate different types of
computations that are part of the same "simulation".
May need a notion of "instruction", so that the controllers know what to call.
"""

View File

@ -2,7 +2,7 @@
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
# (C) Copyright IBM 2018, 2019, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
@ -11,222 +11,139 @@
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name, missing-return-type-doc
# pylint: disable=invalid-name, import-error
"""A module of routines for digesting a PULSE qobj
into something we can actually use.
"""Interpretation and storage of PulseQobj information for pulse simulation
"""
from warnings import warn
from collections import OrderedDict
import numpy as np
from qiskit.providers.aer.aererror import AerError
from .op_system import OPSystem
from .opparse import NoiseParser
from .operators import qubit_occ_oper_dressed
from ..solver.options import OPoptions
# pylint: disable=no-name-in-module,import-error
from ..cy.utils import oplist_to_array
from . import op_qobj as op
# pylint: disable=no-name-in-module
from ..de_solvers.pulse_utils import oplist_to_array
def digest_pulse_obj(qobj, system_model, backend_options=None):
"""Convert specification of a simulation in the pulse language into the format accepted
by the simulator.
Args:
qobj (PulseQobj): experiment specification
system_model (PulseSystemModel): object representing system model
backend_options (dict): dictionary of simulation options
Returns:
out (OPSystem): object understandable by the pulse simulator
Raises:
ValueError: When necessary parameters are missing
Exception: For invalid ode options
class DigestedPulseQobj:
"""Container class for information extracted from PulseQobj
"""
out = OPSystem()
def __init__(self):
# ####################################
# Some "Simulation description"
# ####################################
# parameters related to memory/measurements
self.shots = None
self.meas_level = None
self.meas_return = None
self.memory_slots = None
self.memory = None
self.n_registers = None
# ####################################
# Signal portion
# ####################################
# these contain a particular undocumented data structure
self.pulse_array = None
self.pulse_indices = None
self.pulse_to_int = None
self.qubit_lo_freq = None
# #############################################
# Mix of both signal and simulation description
# #############################################
# These should be turned into an internal "simulation events"
# structure
# "experiments" contains a combination of signal information and
# other experiment descriptions, which should be separated
self.experiments = None
def digest_pulse_qobj(qobj, channels, dt, qubit_list, backend_options=None):
""" Given a PulseQobj (and other parameters), returns a DigestedPulseQobj
containing relevant extracted information
Parameters:
qobj (qobj): the PulseQobj
channels (OrderedDict): channel dictionary
dt (float): pulse sample width
qubit_list (list): list of qubits to include
backend_options (dict): dict with options that can override all other parameters
Returns:
DigestedPulseQobj: digested pulse qobj
Raises:
ValueError: for missing parameters
AerError: for unsupported features or invalid qobj
TypeError: for arguments of invalid type
"""
if backend_options is None:
backend_options = {}
digested_qobj = DigestedPulseQobj()
qobj_dict = qobj.to_dict()
qobj_config = qobj_dict['config']
if backend_options is None:
backend_options = {}
# raises errors for unsupported features
_unsupported_errors(qobj_dict)
# override anything in qobj_config that is present in backend_options
for key in backend_options.keys():
qobj_config[key] = backend_options[key]
noise_model = backend_options.get('noise_model', None)
# post warnings for unsupported features
_unsupported_warnings(qobj_dict, noise_model)
# ###############################
# ### Parse qobj_config settings
# ###############################
if 'memory_slots' not in qobj_config:
raise ValueError('Number of memory_slots must be specific in Qobj config')
out.global_data['shots'] = int(qobj_config.get('shots', 1024))
out.global_data['meas_level'] = int(qobj_config.get('meas_level', 2))
out.global_data['meas_return'] = qobj_config.get('meas_return', 'avg')
out.global_data['memory_slots'] = qobj_config.get('memory_slots', 0)
out.global_data['memory'] = qobj_config.get('memory', False)
out.global_data['n_registers'] = qobj_config.get('n_registers', 0)
# Handle qubit_lo_freq
# First, try to draw from the PulseQobj. If the PulseQobj was assembled using the
# PulseSimulator as the backend, and no qubit_lo_freq was specified, it will default to
# [np.inf]
qubit_lo_freq = None
# set memory and measurement details
digested_qobj.shots = int(qobj_config.get('shots', 1024))
digested_qobj.meas_level = int(qobj_config.get('meas_level', 2))
digested_qobj.meas_return = qobj_config.get('meas_return', 'avg')
digested_qobj.memory_slots = qobj_config.get('memory_slots', 0)
digested_qobj.memory = qobj_config.get('memory', False)
digested_qobj.n_registers = qobj_config.get('n_registers', 0)
# set qubit_lo_freq as given in qobj
if 'qubit_lo_freq' in qobj_config and qobj_config['qubit_lo_freq'] != [np.inf]:
# qobj frequencies are divided by 1e9, so multiply back
qubit_lo_freq = [freq * 1e9 for freq in qobj_config['qubit_lo_freq']]
digested_qobj.qubit_lo_freq = [freq * 1e9 for freq in qobj_config['qubit_lo_freq']]
# if it wasn't specified in the PulseQobj, draw from system_model
if qubit_lo_freq is None:
qubit_lo_freq = system_model._qubit_freq_est
# if still None draw from the Hamiltonian
if qubit_lo_freq is None:
qubit_lo_freq = system_model.hamiltonian.get_qubit_lo_from_drift()
warn('Warning: qubit_lo_freq was not specified in PulseQobj or in PulseSystemModel, ' +
'so it is beign automatically determined from the drift Hamiltonian.')
# Build pulse arrays ***************************************************************
# build pulse arrays from qobj
pulses, pulses_idx, pulse_dict = build_pulse_arrays(qobj_dict['experiments'],
qobj_config['pulse_library'])
out.global_data['pulse_array'] = pulses
out.global_data['pulse_indices'] = pulses_idx
out.pulse_to_int = pulse_dict
digested_qobj.pulse_array = pulses
digested_qobj.pulse_indices = pulses_idx
digested_qobj.pulse_to_int = pulse_dict
# ###############################
# ### Extract model parameters
# ###############################
# Get qubit list and number
qubit_list = system_model.subsystem_list
if qubit_list is None:
raise ValueError('Model must have a qubit list to simulate.')
n_qubits = len(qubit_list)
# get Hamiltonian
if system_model.hamiltonian is None:
raise ValueError('Model must have a Hamiltonian to simulate.')
ham_model = system_model.hamiltonian
# For now we dump this into OpSystem, though that should be refactored
out.system = ham_model._system
out.vars = ham_model._variables
out.channels = ham_model._channels
out.h_diag = ham_model._h_diag
out.evals = ham_model._evals
out.estates = ham_model._estates
dim_qub = ham_model._subsystem_dims
dim_osc = {}
out.freqs = system_model.calculate_channel_frequencies(qubit_lo_freq=qubit_lo_freq)
# convert estates into a Qutip qobj
estates = [op.state(state) for state in ham_model._estates.T[:]]
out.initial_state = estates[0]
out.global_data['vars'] = list(out.vars.values())
# Need this info for evaluating the hamiltonian vars in the c++ solver
out.global_data['vars_names'] = list(out.vars.keys())
out.global_data['freqs'] = list(out.freqs.values())
# Get dt
if system_model.dt is None:
raise ValueError('Qobj must have a dt value to simulate.')
out.dt = system_model.dt
# Parse noise
noise_dict = noise_model or {}
if noise_dict:
noise = NoiseParser(noise_dict=noise_dict,
dim_osc=dim_osc, dim_qub=dim_qub)
noise.parse()
out.noise = noise.compiled
if any(out.noise):
out.can_sample = False
out.global_data['c_num'] = len(out.noise)
else:
out.noise = None
# ###############################
# ### Parse backend_options
# ###############################
if 'seed' in backend_options:
out.global_data['seed'] = int(backend_options.get('seed'))
else:
out.global_data['seed'] = None
out.global_data['q_level_meas'] = int(backend_options.get('q_level_meas', 1))
# solver options
allowed_ode_options = ['atol', 'rtol', 'nsteps', 'max_step',
'num_cpus', 'norm_tol', 'norm_steps',
'rhs_reuse', 'rhs_filename']
ode_options = backend_options.get('ode_options', {})
for key in ode_options:
if key not in allowed_ode_options:
raise Exception('Invalid ode_option: {}'.format(key))
out.ode_options = OPoptions(**ode_options)
# Set the ODE solver max step to be the half the
# width of the smallest pulse
min_width = np.iinfo(np.int32).max
for key, val in out.pulse_to_int.items():
if key != 'pv':
stop = out.global_data['pulse_indices'][val + 1]
start = out.global_data['pulse_indices'][val]
min_width = min(min_width, stop - start)
out.ode_options.max_step = min_width / 2 * out.dt
# ###############################
# ### Convert experiments to data structures.
# ###############################
out.global_data['measurement_ops'] = [None] * n_qubits
experiments = []
for exp in qobj_dict['experiments']:
exp_struct = experiment_to_structs(exp,
out.channels,
out.global_data['pulse_indices'],
out.pulse_to_int,
out.dt, qubit_list)
channels,
pulses_idx,
pulse_dict,
dt,
qubit_list)
experiments.append(exp_struct)
# Add in measurement operators
# Not sure if this will work for multiple measurements
# Note: the extraction of multiple measurements works, but the simulator itself
# implicitly assumes there is only one measurement at the end
if any(exp_struct['acquire']):
for acq in exp_struct['acquire']:
for jj in acq[1]:
if jj > qubit_list[-1]:
continue
if not out.global_data['measurement_ops'][jj]:
out.global_data['measurement_ops'][jj] = \
qubit_occ_oper_dressed(jj,
estates,
h_osc=dim_osc,
h_qub=dim_qub,
level=out.global_data['q_level_meas']
)
digested_qobj.experiments = experiments
out.experiments.append(exp_struct)
if not exp_struct['can_sample']:
out.can_sample = False
# This is a temporary flag while stabilizing cpp func ODE solver
out.use_cpp_ode_func = qobj_config.get('use_cpp_ode_func', True)
return out
return digested_qobj
def _unsupported_warnings(qobj_dict, noise_model):
""" Warns the user about untested/unsupported features.
def _unsupported_errors(qobj_dict):
""" Raises errors for untested/unsupported features.
Parameters:
qobj_dict (dict): qobj in dictionary form
noise_model (dict): backend_options for simulation
Returns:
Raises:
AerError: for unsupported features
@ -234,8 +151,6 @@ def _unsupported_warnings(qobj_dict, noise_model):
# Warnings that don't stop execution
warning_str = '{} are an untested feature, and therefore may not behave as expected.'
if noise_model is not None:
warn(warning_str.format('Noise models'))
if _contains_pv_instruction(qobj_dict['experiments']):
raise AerError(warning_str.format('PersistentValue instructions'))
@ -320,22 +235,40 @@ def build_pulse_arrays(experiments, pulse_library):
for _, pulse in enumerate(pulse_library):
stop = pulses_idx[ind - 1] + len(pulse['samples'])
pulses_idx[ind] = stop
start = pulses_idx[ind - 1]
if isinstance(pulse['samples'], np.ndarray):
pulses[start:stop] = pulse['samples']
else:
oplist_to_array(pulse['samples'], pulses, start)
oplist_to_array(format_pulse_samples(pulse['samples']), pulses, pulses_idx[ind - 1])
ind += 1
for pv in pv_pulses:
stop = pulses_idx[ind - 1] + 1
pulses_idx[ind] = stop
oplist_to_array([pv[0]], pulses, pulses_idx[ind - 1])
oplist_to_array(format_pulse_samples([pv[0]]), pulses, pulses_idx[ind - 1])
ind += 1
return pulses, pulses_idx, pulse_dict
def format_pulse_samples(pulse_samples):
"""Converts input into a list of complex numbers, where each complex numbers is
given as a list of length 2. If it is already of this format, it simply returns it.
This function assumes the input is either an ndarray, a list of numpy complex number types,
or a list already in the desired format.
Args:
pulse_samples (list): An ndarray of complex numbers or a list
Returns:
list: list of the required format
"""
new_samples = list(pulse_samples)
if not np.iscomplexobj(new_samples[0]):
return new_samples
return [[samp.real, samp.imag] for samp in new_samples]
def experiment_to_structs(experiment, ham_chans, pulse_inds,
pulse_to_int, dt, qubit_list=None):
"""Converts an experiment to a better formatted structure

View File

@ -2,7 +2,7 @@
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
# (C) Copyright IBM 2018, 2019, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
@ -11,53 +11,103 @@
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=no-name-in-module, import-error, invalid-name
# This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.
# All rights reserved.
# pylint: disable=no-name-in-module, import-error, unused-variable, invalid-name
"""Monte carlo wave function solver."""
"""
Controller for Monte Carlo state-vector solver method.
"""
from math import log
import logging
import time
import numpy as np
from scipy.integrate import ode
from scipy.linalg.blas import get_blas_funcs
from qiskit.providers.aer.pulse.solver.zvode import qiskit_zvode
from qiskit.providers.aer.pulse.cy.measure import occ_probabilities, write_shots_memory
from ..qutip_lite.cy.spmatfuncs import cy_expect_psi_csr, spmv_csr
from qiskit.tools.parallel import parallel_map, CPU_COUNT
from ..de_solvers.pulse_de_solver import construct_pulse_zvode_solver
from ..de_solvers.pulse_utils import (cy_expect_psi_csr, occ_probabilities,
write_shots_memory, spmv_csr)
dznrm2 = get_blas_funcs("znrm2", dtype=np.float64)
def monte_carlo(seed, exp, op_system):
def run_monte_carlo_experiments(op_system):
""" Runs monte carlo experiments for a given op_system
Parameters:
op_system (PulseSimDescription): container for simulation information
Returns:
tuple: two lists with experiment results
Raises:
Exception: if initial state is of incorrect format
"""
Monte Carlo algorithm returning state-vector or expectation values
at times tlist for a single trajectory.
if not op_system.initial_state.isket:
raise Exception("Initial state must be a state vector.")
# set num_cpus to the value given in settings if none in Options
if not op_system.ode_options.num_cpus:
op_system.ode_options.num_cpus = CPU_COUNT
# setup seeds array
seed = op_system.global_data.get('seed', np.random.randint(np.iinfo(np.int32).max - 1))
prng = np.random.RandomState(seed)
for exp in op_system.experiments:
exp['seed'] = prng.randint(np.iinfo(np.int32).max - 1)
map_kwargs = {'num_processes': op_system.ode_options.num_cpus}
exp_results = []
exp_times = []
for exp in op_system.experiments:
start = time.time()
rng = np.random.RandomState(exp['seed'])
seeds = rng.randint(np.iinfo(np.int32).max - 1,
size=op_system.global_data['shots'])
exp_res = parallel_map(monte_carlo_evolution,
seeds,
task_args=(exp, op_system,),
**map_kwargs)
# exp_results is a list for each shot
# so transform back to an array of shots
exp_res2 = []
for exp_shot in exp_res:
exp_res2.append(exp_shot[0].tolist())
end = time.time()
exp_times.append(end - start)
exp_results.append(np.array(exp_res2))
return exp_results, exp_times
def monte_carlo_evolution(seed, exp, op_system):
""" Performs a single monte carlo run for the given op_system, experiment, and seed
Parameters:
seed (int): seed for random number generation
exp (dict): dictionary containing experiment description
op_system (PulseSimDescription): container for information required for simulation
Returns:
array: results of experiment
Raises:
Exception: if ODE solving has errors
"""
global_data = op_system.global_data
ode_options = op_system.ode_options
cy_rhs_func = global_data['rhs_func']
rng = np.random.RandomState(seed)
tlist = exp['tlist']
snapshots = []
# Init memory
memory = np.zeros((1, global_data['memory_slots']), dtype=np.uint8)
# Init register
register = np.zeros(global_data['n_registers'], dtype=np.uint8)
# Get number of acquire, snapshots, and conditionals
# Get number of acquire
num_acq = len(exp['acquire'])
acq_idx = 0
num_snap = len(exp['snapshot'])
snap_idx = 0
num_cond = len(exp['cond'])
cond_idx = 0
collapse_times = []
collapse_operators = []
@ -65,41 +115,12 @@ def monte_carlo(seed, exp, op_system):
# first rand is collapse norm, second is which operator
rand_vals = rng.rand(2)
ODE = ode(cy_rhs_func)
if op_system.use_cpp_ode_func:
# Don't know how to use OrderedDict type on Cython, so transforming it to dict
channels = dict(op_system.channels)
ODE.set_f_params(global_data, exp, op_system.system, channels, register)
else:
_inst = 'ODE.set_f_params(%s)' % global_data['string']
logging.debug("Monte Carlo: %s\n\n", _inst)
code = compile(_inst, '<string>', 'exec')
# pylint: disable=exec-used
exec(code)
# initialize ODE solver for RHS
ODE._integrator = qiskit_zvode(method=ode_options.method,
order=ode_options.order,
atol=ode_options.atol,
rtol=ode_options.rtol,
nsteps=ode_options.nsteps,
first_step=ode_options.first_step,
min_step=ode_options.min_step,
max_step=ode_options.max_step
)
# Forces complex ODE solving
if not any(ODE._y):
ODE.t = 0.0
ODE._y = np.array([0.0], complex)
ODE._integrator.reset(len(ODE._y), ODE.jac is not None)
ODE.set_initial_value(global_data['initial_state'], 0)
# make array for collapse operator inds
cinds = np.arange(global_data['c_num'])
n_dp = np.zeros(global_data['c_num'], dtype=float)
ODE = construct_pulse_zvode_solver(exp, op_system)
# RUN ODE UNTIL EACH TIME IN TLIST
for stop_time in tlist:
# ODE WHILE LOOP FOR INTEGRATE UP TO TIME TLIST[k]
@ -155,7 +176,7 @@ def monte_carlo(seed, exp, op_system):
n_dp[i] = cy_expect_psi_csr(global_data['n_ops_data'][i],
global_data['n_ops_ind'][i],
global_data['n_ops_ptr'][i],
ODE._y, 1)
ODE._y, True)
# determine which operator does collapse and store it
_p = np.cumsum(n_dp / np.sum(n_dp))

View File

@ -0,0 +1,414 @@
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name
"""
Entry/exit point for pulse simulation specified through PulseSimulator backend
"""
from warnings import warn
import numpy as np
from ..system_models.string_model_parser.string_model_parser import NoiseParser
from ..qutip_extra_lite import qobj_generators as qobj_gen
from .digest_pulse_qobj import digest_pulse_qobj
from ..de_solvers.pulse_de_options import OPoptions
from .unitary_controller import run_unitary_experiments
from .mc_controller import run_monte_carlo_experiments
def pulse_controller(qobj, system_model, backend_options):
""" Interprets PulseQobj input, runs simulations, and returns results
Parameters:
qobj (qobj): pulse qobj containing a list of pulse schedules
system_model (PulseSystemModel): contains system model information
backend_options (dict): dict of options, which overrides other parameters
Returns:
list: simulation results
Raises:
ValueError: if input is of incorrect format
Exception: for invalid ODE options
"""
pulse_sim_desc = PulseSimDescription()
if backend_options is None:
backend_options = {}
noise_model = backend_options.get('noise_model', None)
# post warnings for unsupported features
_unsupported_warnings(noise_model)
# ###############################
# ### Extract model parameters
# ###############################
# Get qubit list and number
qubit_list = system_model.subsystem_list
if qubit_list is None:
raise ValueError('Model must have a qubit list to simulate.')
n_qubits = len(qubit_list)
# get Hamiltonian
if system_model.hamiltonian is None:
raise ValueError('Model must have a Hamiltonian to simulate.')
ham_model = system_model.hamiltonian
# For now we dump this into OpSystem, though that should be refactored
pulse_sim_desc.system = ham_model._system
pulse_sim_desc.vars = ham_model._variables
pulse_sim_desc.channels = ham_model._channels
pulse_sim_desc.h_diag = ham_model._h_diag
pulse_sim_desc.evals = ham_model._evals
pulse_sim_desc.estates = ham_model._estates
dim_qub = ham_model._subsystem_dims
dim_osc = {}
# convert estates into a Qutip qobj
estates = [qobj_gen.state(state) for state in ham_model._estates.T[:]]
pulse_sim_desc.initial_state = estates[0]
pulse_sim_desc.global_data['vars'] = list(pulse_sim_desc.vars.values())
# Need this info for evaluating the hamiltonian vars in the c++ solver
pulse_sim_desc.global_data['vars_names'] = list(pulse_sim_desc.vars.keys())
# Get dt
if system_model.dt is None:
raise ValueError('Qobj must have a dt value to simulate.')
pulse_sim_desc.dt = system_model.dt
# Parse noise
if noise_model:
noise = NoiseParser(noise_dict=noise_model, dim_osc=dim_osc, dim_qub=dim_qub)
noise.parse()
pulse_sim_desc.noise = noise.compiled
if any(pulse_sim_desc.noise):
pulse_sim_desc.can_sample = False
# ###############################
# ### Parse qobj_config settings
# ###############################
digested_qobj = digest_pulse_qobj(qobj,
pulse_sim_desc.channels,
pulse_sim_desc.dt,
qubit_list,
backend_options)
# does this even need to be extracted here, or can the relevant info just be passed to the
# relevant functions?
pulse_sim_desc.global_data['shots'] = digested_qobj.shots
pulse_sim_desc.global_data['meas_level'] = digested_qobj.meas_level
pulse_sim_desc.global_data['meas_return'] = digested_qobj.meas_return
pulse_sim_desc.global_data['memory_slots'] = digested_qobj.memory_slots
pulse_sim_desc.global_data['memory'] = digested_qobj.memory
pulse_sim_desc.global_data['n_registers'] = digested_qobj.n_registers
pulse_sim_desc.global_data['pulse_array'] = digested_qobj.pulse_array
pulse_sim_desc.global_data['pulse_indices'] = digested_qobj.pulse_indices
pulse_sim_desc.pulse_to_int = digested_qobj.pulse_to_int
pulse_sim_desc.experiments = digested_qobj.experiments
# Handle qubit_lo_freq
qubit_lo_freq = digested_qobj.qubit_lo_freq
# if it wasn't specified in the PulseQobj, draw from system_model
if qubit_lo_freq is None:
qubit_lo_freq = system_model._qubit_freq_est
# if still None draw from the Hamiltonian
if qubit_lo_freq is None:
qubit_lo_freq = system_model.hamiltonian.get_qubit_lo_from_drift()
warn('Warning: qubit_lo_freq was not specified in PulseQobj or in PulseSystemModel, ' +
'so it is beign automatically determined from the drift Hamiltonian.')
pulse_sim_desc.freqs = system_model.calculate_channel_frequencies(qubit_lo_freq=qubit_lo_freq)
pulse_sim_desc.global_data['freqs'] = list(pulse_sim_desc.freqs.values())
# ###############################
# ### Parse backend_options
# # solver-specific information should be extracted in the solver
# ###############################
pulse_sim_desc.global_data['seed'] = (int(backend_options['seed']) if 'seed' in backend_options
else None)
pulse_sim_desc.global_data['q_level_meas'] = int(backend_options.get('q_level_meas', 1))
# solver options
allowed_ode_options = ['atol', 'rtol', 'nsteps', 'max_step',
'num_cpus', 'norm_tol', 'norm_steps',
'rhs_reuse', 'rhs_filename']
ode_options = backend_options.get('ode_options', {})
for key in ode_options:
if key not in allowed_ode_options:
raise Exception('Invalid ode_option: {}'.format(key))
pulse_sim_desc.ode_options = OPoptions(**ode_options)
# Set the ODE solver max step to be the half the
# width of the smallest pulse
min_width = np.iinfo(np.int32).max
for key, val in pulse_sim_desc.pulse_to_int.items():
if key != 'pv':
stop = pulse_sim_desc.global_data['pulse_indices'][val + 1]
start = pulse_sim_desc.global_data['pulse_indices'][val]
min_width = min(min_width, stop - start)
pulse_sim_desc.ode_options.max_step = min_width / 2 * pulse_sim_desc.dt
# ########################################
# Determination of measurement operators.
# ########################################
pulse_sim_desc.global_data['measurement_ops'] = [None] * n_qubits
for exp in pulse_sim_desc.experiments:
# Add in measurement operators
# Not sure if this will work for multiple measurements
# Note: the extraction of multiple measurements works, but the simulator itself
# implicitly assumes there is only one measurement at the end
if any(exp['acquire']):
for acq in exp['acquire']:
for jj in acq[1]:
if jj > qubit_list[-1]:
continue
if not pulse_sim_desc.global_data['measurement_ops'][jj]:
q_level_meas = pulse_sim_desc.global_data['q_level_meas']
pulse_sim_desc.global_data['measurement_ops'][jj] = \
qobj_gen.qubit_occ_oper_dressed(jj,
estates,
h_osc=dim_osc,
h_qub=dim_qub,
level=q_level_meas
)
if not exp['can_sample']:
pulse_sim_desc.can_sample = False
op_data_config(pulse_sim_desc)
run_experiments = (run_unitary_experiments if pulse_sim_desc.can_sample
else run_monte_carlo_experiments)
exp_results, exp_times = run_experiments(pulse_sim_desc)
return format_exp_results(exp_results, exp_times, pulse_sim_desc)
def op_data_config(op_system):
""" Preps the data for the opsolver.
This should eventually be replaced by functions that construct different types of DEs
in standard formats
Everything is stored in the passed op_system.
Args:
op_system (OPSystem): An openpulse system.
"""
num_h_terms = len(op_system.system)
H = [hpart[0] for hpart in op_system.system]
op_system.global_data['num_h_terms'] = num_h_terms
# take care of collapse operators, if any
op_system.global_data['c_num'] = 0
if op_system.noise:
op_system.global_data['c_num'] = len(op_system.noise)
op_system.global_data['num_h_terms'] += 1
op_system.global_data['c_ops_data'] = []
op_system.global_data['c_ops_ind'] = []
op_system.global_data['c_ops_ptr'] = []
op_system.global_data['n_ops_data'] = []
op_system.global_data['n_ops_ind'] = []
op_system.global_data['n_ops_ptr'] = []
op_system.global_data['h_diag_elems'] = op_system.h_diag
# if there are any collapse operators
H_noise = 0
for kk in range(op_system.global_data['c_num']):
c_op = op_system.noise[kk]
n_op = c_op.dag() * c_op
# collapse ops
op_system.global_data['c_ops_data'].append(c_op.data.data)
op_system.global_data['c_ops_ind'].append(c_op.data.indices)
op_system.global_data['c_ops_ptr'].append(c_op.data.indptr)
# norm ops
op_system.global_data['n_ops_data'].append(n_op.data.data)
op_system.global_data['n_ops_ind'].append(n_op.data.indices)
op_system.global_data['n_ops_ptr'].append(n_op.data.indptr)
# Norm ops added to time-independent part of
# Hamiltonian to decrease norm
H_noise -= 0.5j * n_op
if H_noise:
H = H + [H_noise]
# construct data sets
op_system.global_data['h_ops_data'] = [-1.0j * hpart.data.data
for hpart in H]
op_system.global_data['h_ops_ind'] = [hpart.data.indices for hpart in H]
op_system.global_data['h_ops_ptr'] = [hpart.data.indptr for hpart in H]
# Convert inital state to flat array in global_data
op_system.global_data['initial_state'] = \
op_system.initial_state.full().ravel()
def format_exp_results(exp_results, exp_times, op_system):
""" format simulation results
Parameters:
exp_results (list): simulation results
exp_times (list): simulation times
op_system (PulseSimDescription): object containing all simulation information
Returns:
list: formatted simulation results
"""
# format the data into the proper output
all_results = []
for idx_exp, exp in enumerate(op_system.experiments):
m_lev = op_system.global_data['meas_level']
m_ret = op_system.global_data['meas_return']
# populate the results dictionary
results = {'seed_simulator': exp['seed'],
'shots': op_system.global_data['shots'],
'status': 'DONE',
'success': True,
'time_taken': exp_times[idx_exp],
'header': exp['header'],
'meas_level': m_lev,
'meas_return': m_ret,
'data': {}}
if op_system.can_sample:
memory = exp_results[idx_exp][0]
results['data']['statevector'] = []
for coef in exp_results[idx_exp][1]:
results['data']['statevector'].append([np.real(coef),
np.imag(coef)])
results['header']['ode_t'] = exp_results[idx_exp][2]
else:
memory = exp_results[idx_exp]
# meas_level 2 return the shots
if m_lev == 2:
# convert the memory **array** into a n
# integer
# e.g. [1,0] -> 2
int_mem = memory.dot(np.power(2.0,
np.arange(memory.shape[1]))).astype(int)
# if the memory flag is set return each shot
if op_system.global_data['memory']:
hex_mem = [hex(val) for val in int_mem]
results['data']['memory'] = hex_mem
# Get hex counts dict
unique = np.unique(int_mem, return_counts=True)
hex_dict = {}
for kk in range(unique[0].shape[0]):
key = hex(unique[0][kk])
hex_dict[key] = unique[1][kk]
results['data']['counts'] = hex_dict
# meas_level 1 returns the <n>
elif m_lev == 1:
if m_ret == 'avg':
memory = [np.mean(memory, 0)]
# convert into the right [real, complex] pair form for json
# this should be cython?
results['data']['memory'] = []
for mem_shot in memory:
results['data']['memory'].append([])
for mem_slot in mem_shot:
results['data']['memory'][-1].append(
[np.real(mem_slot), np.imag(mem_slot)])
if m_ret == 'avg':
results['data']['memory'] = results['data']['memory'][0]
all_results.append(results)
return all_results
def _unsupported_warnings(noise_model):
""" Warns the user about untested/unsupported features.
Parameters:
noise_model (dict): backend_options for simulation
Returns:
Raises:
AerError: for unsupported features
"""
# Warnings that don't stop execution
warning_str = '{} are an untested feature, and therefore may not behave as expected.'
if noise_model is not None:
warn(warning_str.format('Noise models'))
class PulseSimDescription():
""" Object for holding any/all information required for simulation.
Needs to be refactored into different pieces.
"""
def __init__(self):
# The system Hamiltonian in numerical format
self.system = None
# The noise (if any) in numerical format
self.noise = None
# System variables
self.vars = None
# The initial state of the system
self.initial_state = None
# Channels in the Hamiltonian string
# these tell the order in which the channels
# are evaluated in the RHS solver.
self.channels = None
# options of the ODE solver
self.ode_options = None
# time between pulse sample points.
self.dt = None
# Array containing all pulse samples
self.pulse_array = None
# Array of indices indicating where a pulse starts in the self.pulse_array
self.pulse_indices = None
# A dict that translates pulse names to integers for use in self.pulse_indices
self.pulse_to_int = None
# Holds the parsed experiments
self.experiments = []
# Can experiments be simulated once then sampled
self.can_sample = True
# holds global data
self.global_data = {}
# holds frequencies for the channels
self.freqs = {}
# diagonal elements of the hamiltonian
self.h_diag = None
# eigenvalues of the time-independent hamiltonian
self.evals = None
# eigenstates of the time-independent hamiltonian
self.estates = None

View File

@ -0,0 +1,139 @@
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=no-name-in-module, import-error, invalid-name
"""
Controller for solving unitary evolution of a state-vector.
"""
import time
import numpy as np
from scipy.linalg.blas import get_blas_funcs
from qiskit.tools.parallel import parallel_map, CPU_COUNT
from ..de_solvers.pulse_de_solver import construct_pulse_zvode_solver
# Imports from qutip_extra_lite
from ..de_solvers.pulse_utils import occ_probabilities, write_shots_memory
dznrm2 = get_blas_funcs("znrm2", dtype=np.float64)
def run_unitary_experiments(op_system):
""" Runs unitary experiments for a given op_system
Parameters:
op_system (PulseSimDescription): container for simulation information
Returns:
tuple: two lists with experiment results
Raises:
Exception: if initial state is of incorrect format
"""
if not op_system.initial_state.isket:
raise Exception("Initial state must be a state vector.")
# set num_cpus to the value given in settings if none in Options
if not op_system.ode_options.num_cpus:
op_system.ode_options.num_cpus = CPU_COUNT
# setup seeds array
seed = op_system.global_data.get('seed', np.random.randint(np.iinfo(np.int32).max - 1))
prng = np.random.RandomState(seed)
for exp in op_system.experiments:
exp['seed'] = prng.randint(np.iinfo(np.int32).max - 1)
map_kwargs = {'num_processes': op_system.ode_options.num_cpus}
# set up full simulation, i.e. combining different (ideally modular) computational
# resources into one function
def full_simulation(exp, op_system):
psi, ode_t = unitary_evolution(exp, op_system)
# ###############
# do measurement
# ###############
rng = np.random.RandomState(exp['seed'])
shots = op_system.global_data['shots']
# Init memory
memory = np.zeros((shots, op_system.global_data['memory_slots']),
dtype=np.uint8)
qubits = []
memory_slots = []
tlist = exp['tlist']
for acq in exp['acquire']:
if acq[0] == tlist[-1]:
qubits += list(acq[1])
memory_slots += list(acq[2])
qubits = np.array(qubits, dtype='uint32')
memory_slots = np.array(memory_slots, dtype='uint32')
probs = occ_probabilities(qubits, psi, op_system.global_data['measurement_ops'])
rand_vals = rng.rand(memory_slots.shape[0] * shots)
write_shots_memory(memory, memory_slots, probs, rand_vals)
return [memory, psi, ode_t]
# run simulation on each experiment in parallel
start = time.time()
exp_results = parallel_map(full_simulation,
op_system.experiments,
task_args=(op_system,),
**map_kwargs
)
end = time.time()
exp_times = (np.ones(len(op_system.experiments)) *
(end - start) / len(op_system.experiments))
return exp_results, exp_times
def unitary_evolution(exp, op_system):
"""
Calculates evolution when there is no noise,
or any measurements that are not at the end
of the experiment.
Args:
exp (dict): Dictionary of experimental pulse and fc
op_system (OPSystem): Global OpenPulse system settings
Returns:
array: Memory of shots.
Raises:
Exception: Error in ODE solver.
"""
ODE = construct_pulse_zvode_solver(exp, op_system)
tlist = exp['tlist']
for t in tlist[1:]:
ODE.integrate(t, step=0)
if ODE.successful():
psi = ODE.y / dznrm2(ODE.y)
else:
err_msg = 'ZVODE exited with status: %s' % ODE.get_return_code()
raise Exception(err_msg)
# apply final rotation to come out of rotating frame
psi_rot = np.exp(-1j * op_system.global_data['h_diag_elems'] * ODE.t)
psi *= psi_rot
return psi, ODE.t

View File

@ -1,25 +0,0 @@
# -*- coding: utf-8 -*-
#!python
#cython: language_level = 3
#distutils: language = c++
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
cdef complex chan_value(double t,
unsigned int chan_num,
double freq_ch,
double[::1] chan_pulse_times,
complex[::1] pulse_array,
unsigned int[::1] pulse_ints,
double[::1] fc_array,
unsigned char[::1] register)

View File

@ -1,107 +0,0 @@
# -*- coding: utf-8 -*-
#!python
#cython: language_level = 3
#distutils: language = c++
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
cimport cython
cimport cython
from libc.math cimport floor, M_PI
cdef extern from "<complex>" namespace "std" nogil:
double complex exp(double complex x)
@cython.cdivision(True)
cdef inline int get_arr_idx(double t, double start, double stop, int len_arr):
"""Computes the array index value for sampling a pulse in pulse_array.
Args:
t (double): The current simulation time.
start (double): Start time of pulse in question.
stop (double): Stop time of pulse.
len_arr (int): Length of the pulse sample array.
Returns:
int: The array index value.
"""
return <int>floor(((t-start)/(stop-start)*len_arr))
@cython.boundscheck(False)
cdef complex chan_value(double t,
unsigned int chan_num,
double freq_ch,
double[::1] chan_pulse_times,
complex[::1] pulse_array,
unsigned int[::1] pulse_ints,
double[::1] fc_array,
unsigned char[::1] register):
"""Computes the value of a given channel at time `t`.
Args:
t (double): Current time.
chan_num (int): The int that labels the channel.
chan_pulse_times (int array): Array containing
start_time, stop_time, pulse_int, conditional for
each pulse on the channel.
pulse_array (complex array): The array containing all the
pulse data in the passed pulse qobj.
pulse_ints (int array): Array that tells you where to start
indexing pulse_array for a given pulse labeled by
chan_pulse_times[4*kk+2].
current_pulse_idx (int array),
freq_ch (doule) channel frequency:
"""
cdef size_t kk
cdef double start_time, stop_time, phase=0
cdef int start_idx, stop_idx, offset_idx, temp_int, cond
cdef complex out = 0
# This is because each entry has four values:
# start_time, stop_time, pulse_int, conditional
cdef unsigned int num_times = chan_pulse_times.shape[0] // 4
for kk in range(num_times):
# the time is overlapped with the kkth pulse
start_time = chan_pulse_times[4*kk]
stop_time = chan_pulse_times[4*kk+1]
if start_time <= t < stop_time:
cond = <int>chan_pulse_times[4*kk+3]
if cond < 0 or register[cond]:
temp_int = <int>chan_pulse_times[4*kk+2]
start_idx = pulse_ints[temp_int]
stop_idx = pulse_ints[temp_int+1]
offset_idx = get_arr_idx(t, start_time, stop_time, stop_idx-start_idx)
out = pulse_array[start_idx+offset_idx]
break
# Compute the frame change up to time t
if out != 0:
num_times = fc_array.shape[0] // 3
for kk in range(num_times):
if t >= fc_array[3*kk]:
do_fc = 1
# Check if FC is conditioned on register
if fc_array[3*kk+2] >= 0:
# If condition not satisfied no do FC
if not register[<int>fc_array[3*kk+2]]:
do_fc = 0
if do_fc:
# Update the frame change value
phase += fc_array[3*kk+1]
else:
break
if phase != 0:
out *= exp(1j*phase)
out *= exp(-1j*2*M_PI*freq_ch*t)
return out

View File

@ -1,73 +0,0 @@
# -*- coding: utf-8 -*-
#!python
#cython: language_level = 3
#distutils: language = c++
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
cimport cython
import numpy as np
cimport numpy as np
from qiskit.providers.aer.pulse.qutip_lite.cy.spmatfuncs import cy_expect_psi_csr
@cython.boundscheck(False)
def occ_probabilities(unsigned int[::1] qubits, complex[::1] state, list meas_ops):
"""Computes the occupation probabilities of the specifed qubits for
the given state.
Args:
qubits (int array): Ints labelling which qubits are to be measured.
"""
cdef unsigned int num_qubits = <unsigned int>qubits.shape[0]
cdef np.ndarray[double, ndim=1, mode="c"] probs = np.zeros(qubits.shape[0], dtype=float)
cdef size_t kk
cdef object oper
for kk in range(num_qubits):
oper = meas_ops[qubits[kk]]
probs[kk] = cy_expect_psi_csr(oper.data.data,
oper.data.indices,
oper.data.indptr,
state, 1)
return probs
@cython.boundscheck(False)
def write_shots_memory(unsigned char[:, ::1] mem,
unsigned int[::1] mem_slots,
double[::1] probs,
double[::1] rand_vals):
"""Converts probabilities back into shots
Args:
mem
mem_slots
probs: expectation value
rand_vals: random values used to convert back into shots
"""
cdef unsigned int nrows = mem.shape[0]
cdef unsigned int nprobs = probs.shape[0]
cdef size_t ii, jj
cdef unsigned char temp
for ii in range(nrows):
for jj in range(nprobs):
temp = <unsigned char>(probs[jj] > rand_vals[nprobs*ii+jj])
if temp:
mem[ii,mem_slots[jj]] = temp

View File

@ -1,44 +0,0 @@
# -*- coding: utf-8 -*-
#!python
#cython: language_level = 3
#distutils: language = c++
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
cimport cython
@cython.boundscheck(False)
def write_memory(unsigned char[:, ::1] mem,
unsigned int[::1] memory_slots,
double[::1] probs,
double[::1] rand_vals):
"""Writes the results of measurements to memory
in place.
Args:
mem (int8 array): 2D array of memory of size (shots*memory_slots).
memory_slots (uint32 array): Array of ints for memory_slots to write too.
probs (double array): Probability of being in excited state for
each qubit in `qubits`.
rand_vals (double array): Random values of len = len(qubits)*shots
"""
cdef unsigned int nrows = mem.shape[0]
cdef unsigned int nprobs = probs.shape[0]
cdef size_t ii, jj
cdef unsigned char temp
for ii in range(nrows):
for jj in range(nprobs):
temp = <unsigned char>(probs[jj] > rand_vals[nprobs*ii+jj])
if temp:
mem[ii,memory_slots[jj]] = temp

View File

@ -1,45 +0,0 @@
# -*- coding: utf-8 -*-
#!python
#cython: language_level = 3
#distutils: language = c++
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
cimport cython
@cython.boundscheck(False)
@cython.wraparound(False)
def oplist_to_array(list A, complex[::1] B,
int start_idx=0):
"""Takes a list of complex numbers represented by a list
of pairs of floats, and inserts them into a complex NumPy
array at a given starting index.
Parameters:
A (list): A nested-list of [re, im] pairs.
B(ndarray): Array for storing complex numbers from list A.
start_idx (int): The starting index at which to insert elements.
Notes:
This is ~5x faster than doing it in Python.
"""
cdef size_t kk
cdef unsigned int lenA = len(A)
cdef list temp
if (start_idx+lenA) > B.shape[0]:
raise Exception('Input list does not fit into array if start_idx is {}.'.format(start_idx))
for kk in range(lenA):
temp = A[kk]
B[start_idx+kk] = temp[0]+1j*temp[1]

View File

@ -2,7 +2,7 @@
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
# (C) Copyright IBM 2018, 2019, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
@ -12,7 +12,5 @@
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Holds OpenPulse solver settings"""
# Code generation counter
CGEN_NUM = 0
"""DE solvers that interface with the different DE models and signals
"""

View File

@ -2,7 +2,7 @@
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
# (C) Copyright IBM 2018, 2019, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
@ -12,7 +12,7 @@
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""OpenPulse options"""
"""qutip solver options"""
class OPoptions():

View File

@ -0,0 +1,85 @@
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.
# All rights reserved.
# pylint: disable=no-value-for-parameter, invalid-name, import-error
"""Pulse DE solver for problems in qutip format."""
import numpy as np
from scipy.integrate import ode
from scipy.integrate._ode import zvode
# pylint: disable=no-name-in-module
from .pulse_utils import td_ode_rhs_static
def construct_pulse_zvode_solver(exp, op_system):
""" Constructs a scipy ODE solver for a given exp and op_system
Parameters:
exp (dict): dict containing experimental
op_system (PulseSimDescription): container for simulation information
Returns:
ode: scipy ode
"""
# extract relevant data from op_system
global_data = op_system.global_data
ode_options = op_system.ode_options
channels = dict(op_system.channels)
# Init register
register = np.ones(global_data['n_registers'], dtype=np.uint8)
ODE = ode(td_ode_rhs_static)
ODE.set_f_params(global_data, exp, op_system.system, channels, register)
ODE._integrator = qiskit_zvode(method=ode_options.method,
order=ode_options.order,
atol=ode_options.atol,
rtol=ode_options.rtol,
nsteps=ode_options.nsteps,
first_step=ode_options.first_step,
min_step=ode_options.min_step,
max_step=ode_options.max_step
)
# Forces complex ODE solving
if not ODE._y:
ODE.t = 0.0
ODE._y = np.array([0.0], complex)
ODE._integrator.reset(len(ODE._y), ODE.jac is not None)
# Since all experiments are defined to start at zero time.
ODE.set_initial_value(global_data['initial_state'], 0)
return ODE
class qiskit_zvode(zvode):
"""Modifies the stepper for ZVODE so that
it always stops at a given time in tlist;
by default, it over shoots the time.
"""
def step(self, *args):
itask = self.call_args[2]
self.rwork[0] = args[4]
self.call_args[2] = 5
r = self.run(*args)
self.call_args[2] = itask
return r

View File

@ -1,61 +0,0 @@
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name
"The OpenPulse simulator system class"
class OPSystem():
""" A Class that holds all the information
needed to simulate a given PULSE qobj
"""
def __init__(self):
# The system Hamiltonian in numerical format
self.system = None
# The noise (if any) in numerical format
self.noise = None
# System variables
self.vars = None
# The initial state of the system
self.initial_state = None
# Channels in the Hamiltonian string
# these tell the order in which the channels
# are evaluated in the RHS solver.
self.channels = None
# options of the ODE solver
self.ode_options = None
# time between pulse sample points.
self.dt = None
# Array containing all pulse samples
self.pulse_array = None
# Array of indices indicating where a pulse starts in the self.pulse_array
self.pulse_indices = None
# A dict that translates pulse names to integers for use in self.pulse_indices
self.pulse_to_int = None
# Holds the parsed experiments
self.experiments = []
# Can experiments be simulated once then sampled
self.can_sample = True
# holds global data
self.global_data = {}
# holds frequencies for the channels
self.freqs = {}
# diagonal elements of the hamiltonian
self.h_diag = None
# eigenvalues of the time-independent hamiltonian
self.evals = None
# Use C++ version of the function to pass to the ODE solver or the Cython one
self.use_cpp_ode_func = False
# eigenstates of the time-independent hamiltonian
self.estates = None

View File

@ -1,260 +0,0 @@
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name
"""Module for creating quantum operators."""
import numpy as np
import scipy.linalg as la
from ..qobj import op_qobj as op
def gen_oper(opname, index, h_osc, h_qub, states=None):
"""Generate quantum operators.
Args:
opname (str): Name of the operator to be returned.
index (int): Index of operator.
h_osc (dict): Dimension of oscillator subspace
h_qub (dict): Dimension of qubit subspace
states (tuple): State indices of projection operator.
Returns:
Qobj: quantum operator for target qubit.
"""
opr_tmp = None
# get number of levels in Hilbert space
if opname in ['X', 'Y', 'Z', 'Sp', 'Sm', 'I', 'O', 'P']:
is_qubit = True
dim = h_qub.get(index, 2)
if opname in ['X', 'Y', 'Z'] and dim > 2:
if opname == 'X':
opr_tmp = op.get_oper('A', dim) + op.get_oper('C', dim)
elif opname == 'Y':
opr_tmp = (-1j * op.get_oper('A', dim) +
1j * op.get_oper('C', dim))
else:
opr_tmp = op.get_oper('I', dim) - 2 * op.get_oper('N', dim)
else:
is_qubit = False
dim = h_osc.get(index, 5)
if opname == 'P':
opr_tmp = op.get_oper(opname, dim, states)
else:
if opr_tmp is None:
opr_tmp = op.get_oper(opname, dim)
# reverse sort by index
rev_h_osc = sorted(h_osc.items(), key=lambda x: x[0])[::-1]
rev_h_qub = sorted(h_qub.items(), key=lambda x: x[0])[::-1]
# osc_n * … * osc_0 * qubit_n * … * qubit_0
opers = []
for ii, dd in rev_h_osc:
if ii == index and not is_qubit:
opers.append(opr_tmp)
else:
opers.append(op.qeye(dd))
for ii, dd in rev_h_qub:
if ii == index and is_qubit:
opers.append(opr_tmp)
else:
opers.append(op.qeye(dd))
return op.tensor(opers)
def qubit_occ_oper(target_qubit, h_osc, h_qub, level=0):
"""Builds the occupation number operator for a target qubit
in a qubit oscillator system, where the oscillator are the first
subsystems, and the qubit last.
Parameters
----------
target_qubit (int): Qubit for which operator is built.
h_osc (dict): Dict of number of levels in each oscillator.
h_qub (dict): Dict of number of levels in each qubit system.
level (int): Level of qubit system to be measured.
Returns:
Qobj: Occupation number operator for target qubit.
"""
# reverse sort by index
rev_h_osc = sorted(h_osc.items(), key=lambda x: x[0])[::-1]
rev_h_qub = sorted(h_qub.items(), key=lambda x: x[0])[::-1]
# osc_n * … * osc_0 * qubit_n * … * qubit_0
opers = []
for ii, dd in rev_h_osc:
opers.append(op.qeye(dd))
for ii, dd in rev_h_qub:
if ii == target_qubit:
opers.append(op.fock_dm(h_qub.get(target_qubit, 2), level))
else:
opers.append(op.qeye(dd))
return op.tensor(opers)
def qubit_occ_oper_dressed(target_qubit, estates, h_osc, h_qub, level=0):
"""Builds the occupation number operator for a target qubit
in a qubit oscillator system, where the oscillator are the first
subsystems, and the qubit last. This does it for a dressed systems
assuming estates has the same ordering
Args:
target_qubit (int): Qubit for which operator is built.
estates (list): eigenstates in the dressed frame
h_osc (dict): Dict of number of levels in each oscillator.
h_qub (dict): Dict of number of levels in each qubit system.
level (int): Level of qubit system to be measured.
Returns:
Qobj: Occupation number operator for target qubit.
"""
# reverse sort by index
rev_h_osc = sorted(h_osc.items(), key=lambda x: x[0])[::-1]
rev_h_qub = sorted(h_qub.items(), key=lambda x: x[0])[::-1]
# osc_n * … * osc_0 * qubit_n * … * qubit_0
states = []
proj_op = 0 * op.fock_dm(len(estates), 0)
for ii, dd in rev_h_osc:
states.append(op.basis(dd, 0))
for ii, dd in rev_h_qub:
if ii == target_qubit:
states.append(op.basis(dd, level))
else:
states.append(op.state(np.ones(dd)))
state = op.tensor(states)
for ii, estate in enumerate(estates):
if state[ii] == 1:
proj_op += estate * estate.dag()
return proj_op
def measure_outcomes(measured_qubits, state_vector, measure_ops,
seed=None):
"""Generate measurement outcomes for a given set of qubits and state vector.
Parameters:
measured_qubits (array_like): Qubits to be measured.
state_vector(ndarray): State vector.
measure_ops (list): List of measurement operator
seed (int): Optional seed to RandomState for reproducibility.
Returns:
str: String of binaries representing measured qubit values.
"""
outcome_len = max(measured_qubits) + 1
# Create random generator with given seed (if any).
rng_gen = np.random.RandomState(seed)
rnds = rng_gen.rand(outcome_len)
outcomes = ''
for kk in range(outcome_len):
if kk in measured_qubits:
excited_prob = op.opr_prob(measure_ops[kk], state_vector)
if excited_prob >= rnds[kk]:
outcomes += '1'
else:
outcomes += '0'
else:
# We need a string for all the qubits up to the last
# one being measured for the mask operation to work
# Here, we default to unmeasured qubits in the grnd state.
outcomes += '0'
return outcomes
def apply_projector(measured_qubits, results, h_qub, h_osc, state_vector):
"""Builds and applies the projection operator associated
with a given qubit measurement result onto a state vector.
Args:
measured_qubits (list): measured qubit indices.
results (list): results of qubit measurements.
h_qub (dict): Dict of number of levels in each qubit system.
h_osc (dict): Dict of number of levels in each oscillator.
state_vector (ndarray): State vector.
Returns:
Qobj: State vector after projector applied, and normalized.
"""
# reverse sort by index
rev_h_osc = sorted(h_osc.items(), key=lambda x: x[0])[::-1]
rev_h_qub = sorted(h_qub.items(), key=lambda x: x[0])[::-1]
# osc_n * … * osc_0 * qubit_n * … * qubit_0
opers = []
for ii, dd in rev_h_osc:
opers.append(op.qeye(dd))
for ii, dd in rev_h_qub:
if ii in measured_qubits:
opers.append(op.fock_dm(dd, results[ii]))
else:
opers.append(op.qeye(dd))
proj_oper = op.tensor(opers)
psi = op.opr_apply(proj_oper, state_vector)
psi /= la.norm(psi)
return psi
# pylint: disable=dangerous-default-value,comparison-with-callable
def init_fock_state(h_osc, h_qub, noise_dict={}):
""" Generate initial Fock state, in the number state
basis, for an oscillator in a thermal state defined
by the expectation value of number of particles.
Parameters:
h_osc (dict): Dimension of oscillator subspace
h_qub (dict): Dimension of qubit subspace
noise_dict (dict): Dictionary of thermal particles for each oscillator subspace
Returns:
Qobj: State vector
"""
# reverse sort by index
rev_h_osc = sorted(h_osc.items(), key=lambda x: x[0])[::-1]
rev_h_qub = sorted(h_qub.items(), key=lambda x: x[0])[::-1]
# osc_n * … * osc_0 * qubit_n * … * qubit_0
sub_state_vecs = []
for ii, dd in rev_h_osc:
n_thermal = noise_dict['oscillator']['n_th'].get(str(ii), 0)
if n_thermal == 0:
# no thermal particles
idx = 0
else:
# consider finite thermal excitation
levels = np.arange(dd)
beta = np.log(1.0 / n_thermal + 1.0)
diags = np.exp(-1.0 * beta * levels)
diags /= np.sum(diags)
cum_sum = np.cumsum(diags)
idx = np.where(np.random.random() < cum_sum)[0][0]
sub_state_vecs.append(op.basis(dd, idx))
for ii, dd in rev_h_qub:
sub_state_vecs.append(op.basis(dd, 0))
return op.tensor(sub_state_vecs)

View File

@ -2,7 +2,7 @@
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
# (C) Copyright IBM 2018, 2019, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
@ -11,3 +11,6 @@
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Temporary folder containing trimmed down qutip objects and functions
"""

View File

@ -1,16 +1,10 @@
# Cython OP extensions
include(Linter)
include(cython_utils)
# We need to remove the -static flag, because Python Extension system only supports
# dynamic linked libraries, but we want to build a shared libraries with the least
# dependencies we can, so some of these dependencies are linked statically into our
# shared library.
string(REPLACE " -static " "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
set(CYTHON_INSTALL_DIR "qiskit/providers/aer/pulse/cy/")
add_cython_module(channel_value)
add_cython_module(measure)
add_cython_module(memory)
add_cython_module(utils)
set(CYTHON_INSTALL_DIR "qiskit/providers/aer/pulse/qutip_extra_lite/cy")
add_cython_module(spmath)

View File

@ -0,0 +1,265 @@
#!python
#cython: language_level=3
# This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2011 and later, The QuTiP Project.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
import numpy as np
from scipy.sparse import coo_matrix
from ..fastsparse import fast_csr_matrix
cimport numpy as np
cimport cython
from libcpp.algorithm cimport sort
from libcpp.vector cimport vector
from .sparse_structs cimport CSR_Matrix
np.import_array()
from libc.string cimport memset
cdef extern from "numpy/arrayobject.h" nogil:
void PyArray_ENABLEFLAGS(np.ndarray arr, int flags)
void PyDataMem_FREE(void * ptr)
void PyDataMem_RENEW(void * ptr, size_t size)
void PyDataMem_NEW(size_t size)
#Struct used for CSR indices sorting
cdef struct _data_ind_pair:
double complex data
int ind
ctypedef _data_ind_pair data_ind_pair
ctypedef int (*cfptr)(data_ind_pair, data_ind_pair)
cdef void raise_error_CSR(int E, CSR_Matrix * C = NULL):
if not C.numpy_lock and C != NULL:
free_CSR(C)
if E == -1:
raise MemoryError('Could not allocate memory.')
elif E == -2:
raise Exception('Error manipulating CSR_Matrix structure.')
elif E == -3:
raise Exception('CSR_Matrix is not initialized.')
elif E == -4:
raise Exception('NumPy already has lock on data.')
elif E == -5:
raise Exception('Cannot expand data structures past max_length.')
elif E == -6:
raise Exception('CSR_Matrix cannot be expanded.')
elif E == -7:
raise Exception('Data length cannot be larger than max_length')
else:
raise Exception('Error in Cython code.')
cdef inline int int_min(int a, int b) nogil:
return b if b < a else a
cdef inline int int_max(int a, int b) nogil:
return a if a > b else b
@cython.boundscheck(False)
@cython.wraparound(False)
cdef void init_CSR(CSR_Matrix * mat, int nnz, int nrows, int ncols = 0,
int max_length = 0, int init_zeros = 1):
"""
Initialize CSR_Matrix struct. Matrix is assumed to be square with
shape nrows x nrows. Manually set mat.ncols otherwise
Parameters
----------
mat : CSR_Matrix *
Pointer to struct.
nnz : int
Length of data and indices arrays. Also number of nonzero elements
nrows : int
Number of rows in matrix. Also gives length
of indptr array (nrows+1).
ncols : int (default = 0)
Number of cols in matrix. Default is ncols = nrows.
max_length : int (default = 0)
Maximum length of data and indices arrays. Used for resizing.
Default value of zero indicates no resizing.
"""
if max_length == 0:
max_length = nnz
if nnz > max_length:
raise_error_CSR(-7, mat)
if init_zeros:
mat.data = <double complex *>PyDataMem_NEW(nnz * sizeof(double complex))
memset(&mat.data[0],0,nnz * sizeof(double complex))
else:
mat.data = <double complex *>PyDataMem_NEW(nnz * sizeof(double complex))
if mat.data == NULL:
raise_error_CSR(-1, mat)
if init_zeros:
mat.indices = <int *>PyDataMem_NEW(nnz * sizeof(int))
mat.indptr = <int *>PyDataMem_NEW((nrows+1) * sizeof(int))
memset(&mat.indices[0],0,nnz * sizeof(int))
memset(&mat.indptr[0],0,(nrows+1) * sizeof(int))
else:
mat.indices = <int *>PyDataMem_NEW(nnz * sizeof(int))
mat.indptr = <int *>PyDataMem_NEW((nrows+1) * sizeof(int))
mat.nnz = nnz
mat.nrows = nrows
if ncols == 0:
mat.ncols = nrows
else:
mat.ncols = ncols
mat.is_set = 1
mat.max_length = max_length
mat.numpy_lock = 0
@cython.boundscheck(False)
@cython.wraparound(False)
cdef void copy_CSR(CSR_Matrix * out, CSR_Matrix * mat):
"""
Copy a CSR_Matrix.
"""
cdef size_t kk
if not mat.is_set:
raise_error_CSR(-3)
elif out.is_set:
raise_error_CSR(-2)
init_CSR(out, mat.nnz, mat.nrows, mat.nrows, mat.max_length)
# We cannot use memcpy here since there are issues with
# doing so on Win with the GCC compiler
for kk in range(mat.nnz):
out.data[kk] = mat.data[kk]
out.indices[kk] = mat.indices[kk]
for kk in range(mat.nrows+1):
out.indptr[kk] = mat.indptr[kk]
@cython.boundscheck(False)
@cython.wraparound(False)
cdef void free_CSR(CSR_Matrix * mat):
"""
Manually free CSR_Matrix data structures if
data is not locked by NumPy.
"""
if not mat.numpy_lock and mat.is_set:
if mat.data != NULL:
PyDataMem_FREE(mat.data)
if mat.indices != NULL:
PyDataMem_FREE(mat.indices)
if mat.indptr != NULL:
PyDataMem_FREE(mat.indptr)
mat.is_set = 0
else:
raise_error_CSR(-2)
@cython.boundscheck(False)
@cython.wraparound(False)
cdef void shorten_CSR(CSR_Matrix * mat, int N):
"""
Shortends the length of CSR data and indices arrays.
"""
if (not mat.numpy_lock) and mat.is_set:
mat.data = <double complex *>PyDataMem_RENEW(mat.data, N * sizeof(double complex))
mat.indices = <int *>PyDataMem_RENEW(mat.indices, N * sizeof(int))
mat.nnz = N
else:
if mat.numpy_lock:
raise_error_CSR(-4, mat)
elif not mat.is_set:
raise_error_CSR(-3, mat)
@cython.boundscheck(False)
@cython.wraparound(False)
cdef object CSR_to_scipy(CSR_Matrix * mat):
"""
Converts a CSR_Matrix struct to a SciPy csr_matrix class object.
The NumPy arrays are generated from the pointers, and the lifetime
of the pointer memory is tied to that of the NumPy array
(i.e. automatic garbage cleanup.)
Parameters
----------
mat : CSR_Matrix *
Pointer to CSR_Matrix.
"""
cdef np.npy_intp dat_len, ptr_len
cdef np.ndarray[complex, ndim=1] _data
cdef np.ndarray[int, ndim=1] _ind, _ptr
if (not mat.numpy_lock) and mat.is_set:
dat_len = mat.nnz
ptr_len = mat.nrows+1
_data = np.PyArray_SimpleNewFromData(1, &dat_len, np.NPY_COMPLEX128, mat.data)
PyArray_ENABLEFLAGS(_data, np.NPY_OWNDATA)
_ind = np.PyArray_SimpleNewFromData(1, &dat_len, np.NPY_INT32, mat.indices)
PyArray_ENABLEFLAGS(_ind, np.NPY_OWNDATA)
_ptr = np.PyArray_SimpleNewFromData(1, &ptr_len, np.NPY_INT32, mat.indptr)
PyArray_ENABLEFLAGS(_ptr, np.NPY_OWNDATA)
mat.numpy_lock = 1
return fast_csr_matrix((_data, _ind, _ptr), shape=(mat.nrows,mat.ncols))
else:
if mat.numpy_lock:
raise_error_CSR(-4)
elif not mat.is_set:
raise_error_CSR(-3)
@cython.boundscheck(False)
@cython.wraparound(False)
cdef int ind_sort(data_ind_pair x, data_ind_pair y):
return x.ind < y.ind
@cython.boundscheck(False)
@cython.wraparound(False)
cdef void sort_indices(CSR_Matrix * mat):
"""
Sorts the indices of a CSR_Matrix inplace.
"""
cdef size_t ii, jj
cdef vector[data_ind_pair] pairs
cdef cfptr cfptr_ = &ind_sort
cdef int row_start, row_end, length
for ii in range(mat.nrows):
row_start = mat.indptr[ii]
row_end = mat.indptr[ii+1]
length = row_end - row_start
pairs.resize(length)
for jj in range(length):
pairs[jj].data = mat.data[row_start+jj]
pairs[jj].ind = mat.indices[row_start+jj]
sort(pairs.begin(),pairs.end(),cfptr_)
for jj in range(length):
mat.data[row_start+jj] = pairs[jj].data
mat.indices[row_start+jj] = pairs[jj].ind

View File

@ -44,16 +44,4 @@ cdef struct _csr_mat:
int max_length
int numpy_lock
cdef struct _coo_mat:
double complex * data
int * rows
int * cols
int nnz
int nrows
int ncols
int is_set
int max_length
int numpy_lock
ctypedef _csr_mat CSR_Matrix
ctypedef _coo_mat COO_Matrix
ctypedef _csr_mat CSR_Matrix

View File

@ -33,7 +33,7 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
import numpy as np
from qiskit.providers.aer.pulse.qutip_lite.fastsparse import fast_csr_matrix
from ..fastsparse import fast_csr_matrix
cimport numpy as cnp
from libc.math cimport abs, fabs, sqrt
from libcpp cimport bool

View File

@ -33,7 +33,7 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
from qiskit.providers.aer.pulse.qutip_lite.cy.sparse_structs cimport CSR_Matrix
from .sparse_structs cimport CSR_Matrix
cdef void fdense2D_to_CSR(complex[::1, :] mat, CSR_Matrix * out,
unsigned int nrows, unsigned int ncols)

View File

@ -33,7 +33,7 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
import numpy as np
from qiskit.providers.aer.pulse.qutip_lite.fastsparse import fast_csr_matrix
from ..fastsparse import fast_csr_matrix
cimport numpy as cnp
cimport cython
from libc.stdlib cimport div, malloc, free

View File

@ -1,18 +1,5 @@
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
#!python
#cython: language_level=3
# This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.
@ -45,3 +32,18 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
cimport numpy as cnp
cimport cython
from libcpp cimport bool
include "parameters.pxi"
cpdef cnp.ndarray[CTYPE_t, ndim=1, mode="c"] spmv_csr(complex[::1] data,
int[::1] ind, int[::1] ptr, complex[::1] vec)
cpdef cy_expect_psi_csr(complex[::1] data,
int[::1] ind,
int[::1] ptr,
complex[::1] vec,
bool isherm)

View File

@ -0,0 +1,100 @@
#!python
#cython: language_level=3
# This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
import numpy as np
cimport numpy as cnp
cimport cython
cimport libc.math
from libcpp cimport bool
cdef extern from "src/zspmv.hpp" nogil:
void zspmvpy(double complex *data, int *ind, int *ptr, double complex *vec,
double complex a, double complex *out, int nrows)
include "complex_math.pxi"
@cython.boundscheck(False)
@cython.wraparound(False)
cpdef cnp.ndarray[complex, ndim=1, mode="c"] spmv_csr(complex[::1] data,
int[::1] ind, int[::1] ptr, complex[::1] vec):
"""
Sparse matrix, dense vector multiplication.
Here the vector is assumed to have one-dimension.
Matrix must be in CSR format and have complex entries.
Parameters
----------
data : array
Data for sparse matrix.
idx : array
Indices for sparse matrix data.
ptr : array
Pointers for sparse matrix data.
vec : array
Dense vector for multiplication. Must be one-dimensional.
Returns
-------
out : array
Returns dense array.
"""
cdef unsigned int num_rows = ptr.shape[0] - 1
cdef cnp.ndarray[complex, ndim=1, mode="c"] out = np.zeros((num_rows), dtype=np.complex)
zspmvpy(&data[0], &ind[0], &ptr[0], &vec[0], 1.0, &out[0], num_rows)
return out
@cython.boundscheck(False)
@cython.wraparound(False)
cpdef cy_expect_psi_csr(complex[::1] data,
int[::1] ind,
int[::1] ptr,
complex[::1] vec,
bool isherm):
cdef size_t row, jj
cdef int nrows = vec.shape[0]
cdef complex expt = 0, temp, cval
for row in range(nrows):
cval = conj(vec[row])
temp = 0
for jj in range(ptr[row], ptr[row+1]):
temp += data[jj]*vec[ind[jj]]
expt += cval*temp
if isherm :
return real(expt)
else:
return expt

View File

@ -46,159 +46,6 @@ cdef extern from "<complex>" namespace "std" nogil:
include "sparse_routines.pxi"
@cython.boundscheck(False)
@cython.wraparound(False)
def zcsr_add(complex[::1] dataA, int[::1] indsA, int[::1] indptrA,
complex[::1] dataB, int[::1] indsB, int[::1] indptrB,
int nrows, int ncols,
int Annz, int Bnnz,
double complex alpha = 1):
"""
Adds two sparse CSR matries. Like SciPy, we assume the worse case
for the fill A.nnz + B.nnz.
"""
cdef int worse_fill = Annz + Bnnz
cdef int nnz
#Both matrices are zero mats
if Annz == 0 and Bnnz == 0:
return fast_csr_matrix(([], [], []), shape=(nrows,ncols))
#A is the zero matrix
elif Annz == 0:
return fast_csr_matrix((alpha*np.asarray(dataB), indsB, indptrB),
shape=(nrows,ncols))
#B is the zero matrix
elif Bnnz == 0:
return fast_csr_matrix((dataA, indsA, indptrA),
shape=(nrows,ncols))
# Out CSR_Matrix
cdef CSR_Matrix out
init_CSR(&out, worse_fill, nrows, ncols, worse_fill)
nnz = _zcsr_add_core(&dataA[0], &indsA[0], &indptrA[0],
&dataB[0], &indsB[0], &indptrB[0],
alpha,
&out,
nrows, ncols)
#Shorten data and indices if needed
if out.nnz > nnz:
shorten_CSR(&out, nnz)
return CSR_to_scipy(&out)
@cython.boundscheck(False)
@cython.wraparound(False)
cdef void _zcsr_add(CSR_Matrix * A, CSR_Matrix * B, CSR_Matrix * C, double complex alpha):
"""
Adds two sparse CSR matries. Like SciPy, we assume the worse case
for the fill A.nnz + B.nnz.
"""
cdef int worse_fill = A.nnz + B.nnz
cdef int nrows = A.nrows
cdef int ncols = A.ncols
cdef int nnz
init_CSR(C, worse_fill, nrows, ncols, worse_fill)
nnz = _zcsr_add_core(A.data, A.indices, A.indptr,
B.data, B.indices, B.indptr,
alpha, C, nrows, ncols)
#Shorten data and indices if needed
if C.nnz > nnz:
shorten_CSR(C, nnz)
@cython.boundscheck(False)
@cython.wraparound(False)
cdef int _zcsr_add_core(double complex * Adata, int * Aind, int * Aptr,
double complex * Bdata, int * Bind, int * Bptr,
double complex alpha,
CSR_Matrix * C,
int nrows, int ncols) nogil:
cdef int j1, j2, kc = 0
cdef int ka, kb, ka_max, kb_max
cdef size_t ii
cdef double complex tmp
C.indptr[0] = 0
if alpha != 1:
for ii in range(nrows):
ka = Aptr[ii]
kb = Bptr[ii]
ka_max = Aptr[ii+1]-1
kb_max = Bptr[ii+1]-1
while (ka <= ka_max) or (kb <= kb_max):
if ka <= ka_max:
j1 = Aind[ka]
else:
j1 = ncols+1
if kb <= kb_max:
j2 = Bind[kb]
else:
j2 = ncols+1
if j1 == j2:
tmp = Adata[ka] + alpha*Bdata[kb]
if tmp != 0:
C.data[kc] = tmp
C.indices[kc] = j1
kc += 1
ka += 1
kb += 1
elif j1 < j2:
C.data[kc] = Adata[ka]
C.indices[kc] = j1
ka += 1
kc += 1
elif j1 > j2:
C.data[kc] = alpha*Bdata[kb]
C.indices[kc] = j2
kb += 1
kc += 1
C.indptr[ii+1] = kc
else:
for ii in range(nrows):
ka = Aptr[ii]
kb = Bptr[ii]
ka_max = Aptr[ii+1]-1
kb_max = Bptr[ii+1]-1
while (ka <= ka_max) or (kb <= kb_max):
if ka <= ka_max:
j1 = Aind[ka]
else:
j1 = ncols+1
if kb <= kb_max:
j2 = Bind[kb]
else:
j2 = ncols+1
if j1 == j2:
tmp = Adata[ka] + Bdata[kb]
if tmp != 0:
C.data[kc] = tmp
C.indices[kc] = j1
kc += 1
ka += 1
kb += 1
elif j1 < j2:
C.data[kc] = Adata[ka]
C.indices[kc] = j1
ka += 1
kc += 1
elif j1 > j2:
C.data[kc] = Bdata[kb]
C.indices[kc] = j2
kb += 1
kc += 1
C.indptr[ii+1] = kc
return kc
@cython.boundscheck(False)
@cython.wraparound(False)
def zcsr_mult(object A, object B, int sorted = 1):
@ -245,27 +92,6 @@ def zcsr_mult(object A, object B, int sorted = 1):
return CSR_to_scipy(&out)
@cython.boundscheck(False)
@cython.wraparound(False)
cdef void _zcsr_mult(CSR_Matrix * A, CSR_Matrix * B, CSR_Matrix * C):
nnz = _zcsr_mult_pass1(A.data, A.indices, A.indptr,
B.data, B.indices, B.indptr,
A.nrows, B.ncols)
init_CSR(C, nnz, A.nrows, B.ncols)
_zcsr_mult_pass2(A.data, A.indices, A.indptr,
B.data, B.indices, B.indptr,
C,
A.nrows, B.ncols)
#Shorten data and indices if needed
if C.nnz > C.indptr[C.nrows]:
shorten_CSR(C, C.indptr[C.nrows])
sort_indices(C)
@cython.boundscheck(False)
@cython.wraparound(False)
cdef int _zcsr_mult_pass1(double complex * Adata, int * Aind, int * Aptr,
@ -373,26 +199,6 @@ def zcsr_kron(object A, object B):
return CSR_to_scipy(&out)
@cython.boundscheck(False)
@cython.wraparound(False)
cdef void _zcsr_kron(CSR_Matrix * A, CSR_Matrix * B, CSR_Matrix * C):
"""
Computes the kronecker product between two complex
sparse matrices in CSR format.
"""
cdef int out_nnz = _safe_multiply(A.nnz, B.nnz)
cdef int rows_out = A.nrows * B.nrows
cdef int cols_out = A.ncols * B.ncols
init_CSR(C, out_nnz, rows_out, cols_out)
_zcsr_kron_core(A.data, A.indices, A.indptr,
B.data, B.indices, B.indptr,
C,
A.nrows, B.nrows, B.ncols)
@cython.boundscheck(False)
@cython.wraparound(False)
cdef void _zcsr_kron_core(double complex * dataA, int * indsA, int * indptrA,
@ -451,16 +257,6 @@ def zcsr_transpose(object A):
return CSR_to_scipy(&out)
@cython.boundscheck(False)
@cython.wraparound(False)
cdef void _zcsr_transpose(CSR_Matrix * A, CSR_Matrix * B):
"""
Transpose of a sparse matrix in CSR format.
"""
init_CSR(B, A.nnz, A.ncols, A.nrows)
_zcsr_trans_core(A.data, A.indices, A.indptr, B, A.nrows, A.ncols)
@cython.boundscheck(False)
@cython.wraparound(False)
cdef void _zcsr_trans_core(double complex * data, int * ind, int * ptr,
@ -513,18 +309,6 @@ def zcsr_adjoint(object A):
return CSR_to_scipy(&out)
@cython.boundscheck(False)
@cython.wraparound(False)
cdef void _zcsr_adjoint(CSR_Matrix * A, CSR_Matrix * B):
"""
Adjoint of a sparse matrix in CSR format.
"""
init_CSR(B, A.nnz, A.ncols, A.nrows)
_zcsr_adjoint_core(A.data, A.indices, A.indptr,
B, A.nrows, A.ncols)
@cython.boundscheck(False)
@cython.wraparound(False)
cdef void _zcsr_adjoint_core(double complex * data, int * ind, int * ptr,
@ -636,133 +420,4 @@ cdef _safe_multiply(int A, int B):
Computes A*B and checks for overflow.
"""
cdef int C = A*B
return C
@cython.boundscheck(False)
@cython.wraparound(False)
def zcsr_trace(object A, bool isherm):
cdef complex[::1] data = A.data
cdef int[::1] ind = A.indices
cdef int[::1] ptr = A.indptr
cdef int nrows = ptr.shape[0]-1
cdef size_t ii, jj
cdef complex tr = 0
for ii in range(nrows):
for jj in range(ptr[ii], ptr[ii+1]):
if ind[jj] == ii:
tr += data[jj]
break
if imag(tr) == 0 or isherm:
return real(tr)
else:
return tr
@cython.boundscheck(False)
@cython.wraparound(False)
def zcsr_proj(object A, bool is_ket=1):
"""
Computes the projection operator
from a given ket or bra vector
in CSR format. The flag 'is_ket'
is True if passed a ket.
This is ~3x faster than doing the
conjugate transpose and sparse multiplication
directly. Also, does not need a temp matrix.
"""
cdef complex[::1] data = A.data
cdef int[::1] ind = A.indices
cdef int[::1] ptr = A.indptr
cdef int nrows
cdef int nnz
cdef int offset = 0, new_idx, count, change_idx
cdef size_t jj, kk
if is_ket:
nrows = A.shape[0]
nnz = ptr[nrows]
else:
nrows = A.shape[1]
nnz = ptr[1]
cdef CSR_Matrix out
init_CSR(&out, nnz**2, nrows)
if is_ket:
#Compute new ptrs and inds
for jj in range(nrows):
out.indptr[jj] = ptr[jj]*nnz
if ptr[jj+1] != ptr[jj]:
new_idx = jj
for kk in range(nnz):
out.indices[offset+kk*nnz] = new_idx
offset += 1
#set nnz in new ptr
out.indptr[nrows] = nnz**2
#Compute the data
for jj in range(nnz):
for kk in range(nnz):
out.data[jj*nnz+kk] = data[jj]*conj(data[kk])
else:
count = nnz**2
new_idx = nrows
for kk in range(nnz-1,-1,-1):
for jj in range(nnz-1,-1,-1):
out.indices[offset+jj] = ind[jj]
out.data[kk*nnz+jj] = conj(data[kk])*data[jj]
offset += nnz
change_idx = ind[kk]
while new_idx > change_idx:
out.indptr[new_idx] = count
new_idx -= 1
count -= nnz
return CSR_to_scipy(&out)
@cython.boundscheck(False)
@cython.wraparound(False)
def zcsr_inner(object A, object B, bool bra_ket):
"""
Computes the inner-product <A|B> between ket-ket,
or bra-ket vectors in sparse CSR format.
"""
cdef complex[::1] a_data = A.data
cdef int[::1] a_ind = A.indices
cdef int[::1] a_ptr = A.indptr
cdef complex[::1] b_data = B.data
cdef int[::1] b_ind = B.indices
cdef int[::1] b_ptr = B.indptr
cdef int nrows = B.shape[0]
cdef double complex inner = 0
cdef size_t jj, kk
cdef int a_idx, b_idx
if bra_ket:
for kk in range(a_ind.shape[0]):
a_idx = a_ind[kk]
for jj in range(nrows):
if (b_ptr[jj+1]-b_ptr[jj]) != 0:
if jj == a_idx:
inner += a_data[kk]*b_data[b_ptr[jj]]
break
else:
for kk in range(nrows):
a_idx = a_ptr[kk]
b_idx = b_ptr[kk]
if (a_ptr[kk+1]-a_idx) != 0:
if (b_ptr[kk+1]-b_idx) != 0:
inner += conj(a_data[a_idx])*b_data[b_idx]
return inner
return C

View File

@ -2,7 +2,7 @@
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
# (C) Copyright IBM 2018, 2019, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
@ -132,77 +132,3 @@ def type_from_dims(dims, enforce_square=True):
return 'super'
return 'other'
def _enumerate_flat(l, idx=0):
if not isinstance(l, list):
# Found a scalar, so return and increment.
return idx, idx + 1
else:
# Found a list, so append all the scalars
# from it and recurse to keep the increment
# correct.
acc = []
for elem in l:
labels, idx = _enumerate_flat(elem, idx)
acc.append(labels)
return acc, idx
def _collapse_composite_index(dims):
"""
Given the dimensions specification for a composite index
(e.g.: [2, 3] for the right index of a ket with dims [[1], [2, 3]]),
returns a dimensions specification for an index of the same shape,
but collapsed to a single "leg." In the previous example, [2, 3]
would collapse to [6].
"""
return [np.prod(dims)]
def _collapse_dims_to_level(dims, level=1):
"""
Recursively collapses all indices in a dimensions specification
appearing at a given level, such that the returned dimensions
specification does not represent any composite systems.
"""
if level == 0:
return _collapse_composite_index(dims)
else:
return [_collapse_dims_to_level(index, level=level - 1) for index in dims]
def collapse_dims_super(dims):
"""
Given the dimensions specifications for an operator-ket-, operator-bra- or
super-type Qobj, returns a dimensions specification describing the same shape
by collapsing all composite systems. For instance, the super-type
dimensions specification ``[[[2, 3], [2, 3]], [[2, 3], [2, 3]]]`` collapses to
``[[[6], [6]], [[6], [6]]]``.
Args:
dims (list): Dimensions specifications to be collapsed.
Returns:
list: Collapsed dimensions specification describing the same shape
such that ``len(collapsed_dims[i][j]) == 1`` for ``i`` and ``j``
in ``range(2)``.
"""
return _collapse_dims_to_level(dims, 2)
def enumerate_flat(l):
"""Labels the indices at which scalars occur in a flattened list.
Given a list containing a mix of scalars and lists,
returns a list of the same structure, where each scalar
has been replaced by an index into the flattened list.
Examples
--------
>>> print(enumerate_flat([[[10], [20, 30]], 40]))
[[[0], [1, 2]], 3]
"""
return _enumerate_flat(l)[0]

View File

@ -2,7 +2,7 @@
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
# (C) Copyright IBM 2018, 2019, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory

View File

@ -2,7 +2,7 @@
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
# (C) Copyright IBM 2018, 2019, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
@ -53,10 +53,8 @@ of commonly occuring quantum operators.
"""
import numpy as np
import scipy.sparse as sp
from .fastsparse import fast_csr_matrix, fast_identity
# pylint: disable=no-member
from .qobj import Qobj
# Spin operators
@ -430,6 +428,7 @@ def momentum(N, offset=0):
return -1j / np.sqrt(2.0) * (a - a.dag())
# number operator, important!
def num(N, offset=0):
"""Quantum object for number operator.
@ -456,157 +455,3 @@ def num(N, offset=0):
return Qobj(fast_csr_matrix((data, ind, ptr),
shape=(N, N)), isherm=True)
def squeeze(N, z, offset=0):
"""Single-mode Squeezing operator.
Args:
N (int): Dimension of hilbert space.
z (complex): Squeezing parameter.
offset (int): (default 0) The lowest number state that is included
in the finite number state representation of the operator.
Returns:
Qobj:`Squeezing operator.
"""
a = destroy(N, offset=offset)
op = (1 / 2.0) * np.conj(z) * (a ** 2) - (1 / 2.0) * z * (a.dag()) ** 2
return op.expm()
def squeezing(a1, a2, z):
"""Generalized squeezing operator.
Args:
a1 (Qobj): Operator 1.
a2 (Qobj): Operator 2.
z (complex): Squeezing parameter.
Returns:
Qobj: Squeezing operator.
"""
b = 0.5 * (np.conj(z) * (a1 * a2) - z * (a1.dag() * a2.dag()))
return b.expm()
def displace(N, alpha, offset=0):
"""Single-mode displacement operator.
Args:
N (int): Dimension of Hilbert space.
alpha (complex): Displacement amplitude.
offset (int): The lowest number state that is included
in the finite number state
representation of the operator.
Returns:
Qobj: Displacement operator.
"""
a = destroy(N, offset=offset)
D = (alpha * a.dag() - np.conj(alpha) * a).expm()
return D
def commutator(A, B, kind="normal"):
"""
Return the commutator of kind `kind` (normal, anti) of the
two operators A and B.
Args:
A (Qobj): Operator A.
B (Qobj): Operator B.
kind (str): 'normal' or 'anti' commutator.
Returns:
Qobj: Commutator
Raises:
TypeError: Invalid input.
"""
if kind == 'normal':
return A * B - B * A
elif kind == 'anti':
return A * B + B * A
else:
raise TypeError("Unknown commutator kind '%s'" % kind)
def qzero(N):
"""
Zero operator
Args:
N (int or list): Dimension of Hilbert space. If provided as a
list of ints, then the dimension is the product
over this list, but the ``dims`` property of the
new Qobj are set to this list.
Returns:
Qobj: Zero operator Qobj.
Raises:
ValueError: Invalid input.
"""
N = int(N)
if (not isinstance(N, (int, np.integer))) or N < 0:
raise ValueError("N must be integer N>=0")
return Qobj(sp.csr_matrix((N, N), dtype=complex), isherm=True)
def charge(Nmax, Nmin=None, frac=1):
"""
Generate the diagonal charge operator over charge states
from Nmin to Nmax.
Args:
Nmax (int): Maximum charge state to consider.
Nmin (int): (default = -Nmax) Lowest charge state to consider.
frac (float): (default = 1) Specify fractional charge if needed.
Returns:
Qobj: Charge operator over [Nmin,Nmax].
"""
if Nmin is None:
Nmin = -Nmax
diag = np.arange(Nmin, Nmax + 1, dtype=float)
if frac != 1:
diag *= frac
C = sp.diags(diag, 0, format='csr', dtype=complex)
return Qobj(C, isherm=True)
def tunneling(N, m=1):
"""
Tunneling operator with elements of the form
:math:`\\sum |N><N+m| + |N+m><N|`.
Args:
N (int): Number of basis states in Hilbert space.
m (int): Number of excitations in tunneling event.
Returns:
Qobj: Tunneling operator.
"""
diags = [np.ones(N - m, dtype=int), np.ones(N - m, dtype=int)]
T = sp.diags(diags, [m, -m], format='csr', dtype=complex)
return Qobj(T, isherm=True)
# pylint: disable=wrong-import-position
from .qobj import Qobj

View File

@ -0,0 +1,767 @@
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
# pylint: disable=invalid-name, redefined-outer-name, no-name-in-module
# pylint: disable=import-error, unused-import
"""The Quantum Object (Qobj) class, for representing quantum states and
operators, and related functions.
"""
__all__ = ['Qobj']
import warnings
import builtins
# import math functions from numpy.math: required for td string evaluation
import numpy as np
import scipy.sparse as sp
import scipy.linalg as la
from qiskit.providers.aer.version import __version__
from .dimensions import type_from_dims
# used in existing functions that break if removed
from .cy.spmath import (zcsr_adjoint, zcsr_isherm)
from .fastsparse import fast_csr_matrix, fast_identity
# general absolute tolerance
atol = 1e-12
class Qobj():
"""A class for representing quantum objects, such as quantum operators
and states.
The Qobj class is the QuTiP representation of quantum operators and state
vectors. This class also implements math operations +,-,* between Qobj
instances (and / by a C-number), as well as a collection of common
operator/state operations. The Qobj constructor optionally takes a
dimension ``list`` and/or shape ``list`` as arguments.
Attributes
----------
data : array_like
Sparse matrix characterizing the quantum object.
dims : list
List of dimensions keeping track of the tensor structure.
shape : list
Shape of the underlying `data` array.
type : str
Type of quantum object: 'bra', 'ket', 'oper', 'operator-ket',
'operator-bra', or 'super'.
superrep : str
Representation used if `type` is 'super'. One of 'super'
(Liouville form) or 'choi' (Choi matrix with tr = dimension).
isherm : bool
Indicates if quantum object represents Hermitian operator.
isunitary : bool
Indictaes if quantum object represents unitary operator.
iscp : bool
Indicates if the quantum object represents a map, and if that map is
completely positive (CP).
ishp : bool
Indicates if the quantum object represents a map, and if that map is
hermicity preserving (HP).
istp : bool
Indicates if the quantum object represents a map, and if that map is
trace preserving (TP).
iscptp : bool
Indicates if the quantum object represents a map that is completely
positive and trace preserving (CPTP).
isket : bool
Indicates if the quantum object represents a ket.
isbra : bool
Indicates if the quantum object represents a bra.
isoper : bool
Indicates if the quantum object represents an operator.
issuper : bool
Indicates if the quantum object represents a superoperator.
isoperket : bool
Indicates if the quantum object represents an operator in column vector
form.
isoperbra : bool
Indicates if the quantum object represents an operator in row vector
form.
Methods
-------
copy()
Create copy of Qobj
conj()
Conjugate of quantum object.
cosm()
Cosine of quantum object.
dag()
Adjoint (dagger) of quantum object.
dnorm()
Diamond norm of quantum operator.
dual_chan()
Dual channel of quantum object representing a CP map.
eigenenergies(sparse=False, sort='low', eigvals=0, tol=0, maxiter=100000)
Returns eigenenergies (eigenvalues) of a quantum object.
eigenstates(sparse=False, sort='low', eigvals=0, tol=0, maxiter=100000)
Returns eigenenergies and eigenstates of quantum object.
expm()
Matrix exponential of quantum object.
full(order='C')
Returns dense array of quantum object `data` attribute.
groundstate(sparse=False, tol=0, maxiter=100000)
Returns eigenvalue and eigenket for the groundstate of a quantum
object.
matrix_element(bra, ket)
Returns the matrix element of operator between `bra` and `ket` vectors.
norm(norm='tr', sparse=False, tol=0, maxiter=100000)
Returns norm of a ket or an operator.
proj()
Computes the projector for a ket or bra vector.
sinm()
Sine of quantum object.
sqrtm()
Matrix square root of quantum object.
tidyup(atol=1e-12)
Removes small elements from quantum object.
tr()
Trace of quantum object.
trans()
Transpose of quantum object.
transform(inpt, inverse=False)
Performs a basis transformation defined by `inpt` matrix.
trunc_neg(method='clip')
Removes negative eigenvalues and returns a new Qobj that is
a valid density operator.
unit(norm='tr', sparse=False, tol=0, maxiter=100000)
Returns normalized quantum object.
"""
__array_priority__ = 100 # sets Qobj priority above numpy arrays
# pylint: disable=dangerous-default-value, redefined-builtin
def __init__(self, inpt=None, dims=[[], []], shape=[],
type=None, isherm=None, copy=True,
fast=False, superrep=None, isunitary=None):
"""
Qobj constructor.
Args:
inpt (ndarray): Input array or matrix data.
dims (list): List of Qobj dims.
shape (list): shape of underlying data.
type (str): Is object a ket, bra, oper, super.
isherm (bool): Is object Hermitian.
copy (bool): Copy input data.
fast (str or bool): Fast object instantiation.
superrep (str): Type of super representaiton.
isunitary (bool): Is object unitary.
Raises:
Exception: Something bad happened.
"""
self._isherm = isherm
self._type = type
self.superrep = superrep
self._isunitary = isunitary
if fast == 'mc':
# fast Qobj construction for use in mcsolve with ket output
self._data = inpt
self.dims = dims
self._isherm = False
return
if fast == 'mc-dm':
# fast Qobj construction for use in mcsolve with dm output
self._data = inpt
self.dims = dims
self._isherm = True
return
if isinstance(inpt, Qobj):
# if input is already Qobj then return identical copy
self._data = fast_csr_matrix((inpt.data.data, inpt.data.indices,
inpt.data.indptr),
shape=inpt.shape, copy=copy)
if not np.any(dims):
# Dimensions of quantum object used for keeping track of tensor
# components
self.dims = inpt.dims
else:
self.dims = dims
self.superrep = inpt.superrep
self._isunitary = inpt._isunitary
elif inpt is None:
# initialize an empty Qobj with correct dimensions and shape
if any(dims):
N, M = np.prod(dims[0]), np.prod(dims[1])
self.dims = dims
elif shape:
N, M = shape
self.dims = [[N], [M]]
else:
N, M = 1, 1
self.dims = [[N], [M]]
self._data = fast_csr_matrix(shape=(N, M))
elif isinstance(inpt, (list, tuple)):
# case where input is a list
data = np.array(inpt)
if len(data.shape) == 1:
# if list has only one dimension (i.e [5,4])
data = data.transpose()
_tmp = sp.csr_matrix(data, dtype=complex)
self._data = fast_csr_matrix((_tmp.data, _tmp.indices, _tmp.indptr),
shape=_tmp.shape)
if not np.any(dims):
self.dims = [[int(data.shape[0])], [int(data.shape[1])]]
else:
self.dims = dims
elif isinstance(inpt, np.ndarray) or sp.issparse(inpt):
# case where input is array or sparse
if inpt.ndim == 1:
inpt = inpt[:, np.newaxis]
do_copy = copy
if not isinstance(inpt, fast_csr_matrix):
_tmp = sp.csr_matrix(inpt, dtype=complex, copy=do_copy)
_tmp.sort_indices() # Make sure indices are sorted.
do_copy = 0
else:
_tmp = inpt
self._data = fast_csr_matrix((_tmp.data, _tmp.indices, _tmp.indptr),
shape=_tmp.shape, copy=do_copy)
if not np.any(dims):
self.dims = [[int(inpt.shape[0])], [int(inpt.shape[1])]]
else:
self.dims = dims
elif isinstance(inpt, (int, float, complex,
np.integer, np.floating, np.complexfloating)):
# if input is int, float, or complex then convert to array
_tmp = sp.csr_matrix([[inpt]], dtype=complex)
self._data = fast_csr_matrix((_tmp.data, _tmp.indices, _tmp.indptr),
shape=_tmp.shape)
if not np.any(dims):
self.dims = [[1], [1]]
else:
self.dims = dims
else:
warnings.warn("Initializing Qobj from unsupported type: %s" %
builtins.type(inpt))
inpt = np.array([[0]])
_tmp = sp.csr_matrix(inpt, dtype=complex, copy=copy)
self._data = fast_csr_matrix((_tmp.data, _tmp.indices, _tmp.indptr),
shape=_tmp.shape)
self.dims = [[int(inpt.shape[0])], [int(inpt.shape[1])]]
if type == 'super':
# Type is not super, i.e. dims not explicitly passed, but oper shape
if dims == [[], []] and self.shape[0] == self.shape[1]:
sub_shape = np.sqrt(self.shape[0])
# check if root of shape is int
if (sub_shape % 1) != 0:
raise Exception('Invalid shape for a super operator.')
sub_shape = int(sub_shape)
self.dims = [[[sub_shape], [sub_shape]]] * 2
if superrep:
self.superrep = superrep
else:
if self.type == 'super' and self.superrep is None:
self.superrep = 'super'
# clear type cache
self._type = None
def copy(self):
"""Create identical copy"""
return Qobj(inpt=self)
def get_data(self):
"""Gets underlying data."""
return self._data
# Here we perfrom a check of the csr matrix type during setting of Q.data
def set_data(self, data):
"""Data setter
"""
if not isinstance(data, fast_csr_matrix):
raise TypeError('Qobj data must be in fast_csr format.')
self._data = data
data = property(get_data, set_data)
def __add__(self, other):
"""
ADDITION with Qobj on LEFT [ ex. Qobj+4 ]
"""
self._isunitary = None
if not isinstance(other, Qobj):
if isinstance(other, (int, float, complex, np.integer,
np.floating, np.complexfloating, np.ndarray,
list, tuple)) or sp.issparse(other):
other = Qobj(other)
else:
return NotImplemented
if np.prod(other.shape) == 1 and np.prod(self.shape) != 1:
# case for scalar quantum object
dat = other.data[0, 0]
if dat == 0:
return self
out = Qobj()
if self.type in ['oper', 'super']:
out.data = self.data + dat * fast_identity(
self.shape[0])
else:
out.data = self.data
out.data.data = out.data.data + dat
out.dims = self.dims
if isinstance(dat, (int, float)):
out._isherm = self._isherm
else:
# We use _isherm here to prevent recalculating on self and
# other, relying on that bool(None) == False.
out._isherm = (True if self._isherm and other._isherm
else out.isherm)
out.superrep = self.superrep
return out
elif np.prod(self.shape) == 1 and np.prod(other.shape) != 1:
# case for scalar quantum object
dat = self.data[0, 0]
if dat == 0:
return other
out = Qobj()
if other.type in ['oper', 'super']:
out.data = dat * fast_identity(other.shape[0]) + other.data
else:
out.data = other.data
out.data.data = out.data.data + dat
out.dims = other.dims
if isinstance(dat, complex):
out._isherm = out.isherm
else:
out._isherm = self._isherm
out.superrep = self.superrep
return out
elif self.dims != other.dims:
raise TypeError('Incompatible quantum object dimensions')
elif self.shape != other.shape:
raise TypeError('Matrix shapes do not match')
else: # case for matching quantum objects
out = Qobj()
out.data = self.data + other.data
out.dims = self.dims
if self.type in ['ket', 'bra', 'operator-ket', 'operator-bra']:
out._isherm = False
elif self._isherm is None or other._isherm is None:
out._isherm = out.isherm
elif not self._isherm and not other._isherm:
out._isherm = out.isherm
else:
out._isherm = self._isherm and other._isherm
if self.superrep and other.superrep:
if self.superrep != other.superrep:
msg = ("Adding superoperators with different " +
"representations")
warnings.warn(msg)
out.superrep = self.superrep
return out
def __radd__(self, other):
"""
ADDITION with Qobj on RIGHT [ ex. 4+Qobj ]
"""
return self + other
def __sub__(self, other):
"""
SUBTRACTION with Qobj on LEFT [ ex. Qobj-4 ]
"""
return self + (-other)
def __rsub__(self, other):
"""
SUBTRACTION with Qobj on RIGHT [ ex. 4-Qobj ]
"""
return (-self) + other
# used
# pylint: disable=too-many-return-statements
def __mul__(self, other):
"""
MULTIPLICATION with Qobj on LEFT [ ex. Qobj*4 ]
"""
self._isunitary = None
if isinstance(other, Qobj):
if self.dims[1] == other.dims[0]:
out = Qobj()
out.data = self.data * other.data
dims = [self.dims[0], other.dims[1]]
out.dims = dims
out.dims = dims
out._isherm = None
if self.superrep and other.superrep:
if self.superrep != other.superrep:
msg = ("Multiplying superoperators with different " +
"representations")
warnings.warn(msg)
out.superrep = self.superrep
return out
elif np.prod(self.shape) == 1:
out = Qobj(other)
out.data *= self.data[0, 0]
out.superrep = other.superrep
return out
elif np.prod(other.shape) == 1:
out = Qobj(self)
out.data *= other.data[0, 0]
out.superrep = self.superrep
return out
else:
raise TypeError("Incompatible Qobj shapes")
elif isinstance(other, np.ndarray):
if other.dtype == 'object':
return np.array([self * item for item in other],
dtype=object)
else:
return self.data * other
elif isinstance(other, list):
# if other is a list, do element-wise multiplication
return np.array([self * item for item in other],
dtype=object)
elif isinstance(other, (int, float, complex,
np.integer, np.floating, np.complexfloating)):
out = Qobj()
out.data = self.data * other
out.dims = self.dims
out.superrep = self.superrep
if isinstance(other, complex):
out._isherm = out.isherm
else:
out._isherm = self._isherm
return out
else:
return NotImplemented
# keep for now
def __rmul__(self, other):
"""
MULTIPLICATION with Qobj on RIGHT [ ex. 4*Qobj ]
"""
if isinstance(other, np.ndarray):
if other.dtype == 'object':
return np.array([item * self for item in other],
dtype=object)
else:
return other * self.data
elif isinstance(other, list):
# if other is a list, do element-wise multiplication
return np.array([item * self for item in other],
dtype=object)
elif isinstance(other, (int, float, complex,
np.integer, np.floating,
np.complexfloating)):
out = Qobj()
out.data = other * self.data
out.dims = self.dims
out.superrep = self.superrep
if isinstance(other, complex):
out._isherm = out.isherm
else:
out._isherm = self._isherm
return out
else:
raise TypeError("Incompatible object for multiplication")
# keep for now
def __truediv__(self, other):
return self.__div__(other)
# keep for now
def __div__(self, other):
"""
DIVISION (by numbers only)
"""
if isinstance(other, Qobj): # if both are quantum objects
raise TypeError("Incompatible Qobj shapes " +
"[division with Qobj not implemented]")
if isinstance(other, (int, float, complex,
np.integer, np.floating, np.complexfloating)):
out = Qobj()
out.data = self.data / other
out.dims = self.dims
if isinstance(other, complex):
out._isherm = out.isherm
else:
out._isherm = self._isherm
out.superrep = self.superrep
return out
else:
raise TypeError("Incompatible object for division")
# keep for now
def __neg__(self):
"""
NEGATION operation.
"""
out = Qobj()
out.data = -self.data
out.dims = self.dims
out.superrep = self.superrep
out._isherm = self._isherm
out._isunitary = self._isunitary
return out
# needed by qobj_generators
def __getitem__(self, ind):
"""
GET qobj elements.
"""
out = self.data[ind]
if sp.issparse(out):
return np.asarray(out.todense())
else:
return out
# keep for now
def __eq__(self, other):
"""
EQUALITY operator.
"""
return bool(isinstance(other, Qobj) and
self.dims == other.dims and
not np.any(np.abs((self.data - other.data).data) > atol))
# keep for now
def __ne__(self, other):
"""
INEQUALITY operator.
"""
return not self == other
# not needed functionally but is useful to keep for now
def __str__(self):
s = ""
t = self.type
shape = self.shape
if self.type in ['oper', 'super']:
s += ("Quantum object: " +
"dims = " + str(self.dims) +
", shape = " + str(shape) +
", type = " + t +
", isherm = " + str(self.isherm) +
(
", superrep = {0.superrep}".format(self)
if t == "super" and self.superrep != "super"
else ""
) + "\n")
else:
s += ("Quantum object: " +
"dims = " + str(self.dims) +
", shape = " + str(shape) +
", type = " + t + "\n")
s += "Qobj data =\n"
if shape[0] > 10000 or shape[1] > 10000:
# if the system is huge, don't attempt to convert to a
# dense matrix and then to string, because it is pointless
# and is likely going to produce memory errors. Instead print the
# sparse data string representation
s += str(self.data)
elif all(np.imag(self.data.data) == 0):
s += str(np.real(self.full()))
else:
s += str(self.full())
return s
# used by states
def dag(self):
"""Adjoint operator of quantum object.
"""
out = Qobj()
out.data = zcsr_adjoint(self.data)
out.dims = [self.dims[1], self.dims[0]]
out._isherm = self._isherm
out.superrep = self.superrep
return out
# breaks if removed - used in hamiltonian_model
def full(self, order='C', squeeze=False):
"""Dense array from quantum object.
Parameters
----------
order : str {'C', 'F'}
Return array in C (default) or Fortran ordering.
squeeze : bool {False, True}
Squeeze output array.
Returns
-------
data : array
Array of complex data from quantum objects `data` attribute.
"""
if squeeze:
return self.data.toarray(order=order).squeeze()
else:
return self.data.toarray(order=order)
# breaks if removed - only ever actually used in tests for duffing_model_generators
# pylint: disable=unused-argument
def __array__(self, *arg, **kwarg):
"""Numpy array from Qobj
For compatibility with np.array
"""
return self.full()
# breaks if removed due to tensor
@property
def isherm(self):
"""Is operator Hermitian.
Returns:
bool: Operator is Hermitian or not.
"""
if self._isherm is not None:
# used previously computed value
return self._isherm
self._isherm = bool(zcsr_isherm(self.data))
return self._isherm
# breaks if removed due to tensor
@isherm.setter
def isherm(self, isherm):
self._isherm = isherm
@property
def type(self):
"""Type of Qobj
"""
if not self._type:
self._type = type_from_dims(self.dims)
return self._type
@property
def shape(self):
"""Shape of Qobj
"""
if self.data.shape == (1, 1):
return tuple([np.prod(self.dims[0]), np.prod(self.dims[1])])
else:
return tuple(self.data.shape)
# breaks if removed - called in pulse_controller
# when initial state being set
@property
def isket(self):
"""Is ket vector"""
return self.type == 'ket'
# breaks if removed - called in tensor
@property
def issuper(self):
"""Is super operator"""
return self.type == 'super'

View File

@ -2,7 +2,7 @@
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
# (C) Copyright IBM 2018, 2019, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
@ -16,11 +16,10 @@
"""Operators to use in simulator"""
import numpy as np
from ..qutip_lite import operators as ops
from ..qutip_lite import states as st
from ..qutip_lite import tensor as ten
from ..qutip_lite.qobj import Qobj
from ..qutip_lite.cy.spmatfuncs import (spmv_csr, cy_expect_psi_csr)
from . import operators as ops
from . import states as st
from . import tensor as ten
from .qobj import Qobj
def sigmax(dim=2):
@ -102,63 +101,6 @@ def tensor(list_qobj):
return ten.tensor(list_qobj)
def conj(val):
""" Qiskit wrapper of conjugate
"""
if isinstance(val, Qobj):
return val.conj()
else:
return np.conj(val)
def sin(val):
""" Qiskit wrapper of sine function
"""
if isinstance(val, Qobj):
return val.sinm()
else:
return np.sin(val)
def cos(val):
""" Qiskit wrapper of cosine function
"""
if isinstance(val, Qobj):
return val.cosm()
else:
return np.cos(val)
def exp(val):
""" Qiskit wrapper of exponential function
"""
if isinstance(val, Qobj):
return val.expm()
else:
return np.exp(val)
def sqrt(val):
""" Qiskit wrapper of square root
"""
if isinstance(val, Qobj):
return val.sqrtm()
else:
return np.sqrt(val)
def dag(qobj):
""" Qiskit wrapper of adjoint
"""
return qobj.dag()
def dammy(qobj):
""" Return given quantum object
"""
return qobj
def basis(level, pos):
""" Qiskit wrapper of basis
"""
@ -177,22 +119,44 @@ def fock_dm(level, eigv):
return st.fock_dm(level, eigv)
def opr_prob(opr, state_vec):
""" Measure probability of operator in given quantum state
"""
return cy_expect_psi_csr(opr.data.data,
opr.data.indices,
opr.data.indptr,
state_vec, 1)
def qubit_occ_oper_dressed(target_qubit, estates, h_osc, h_qub, level=0):
"""Builds the occupation number operator for a target qubit
in a qubit oscillator system, where the oscillator are the first
subsystems, and the qubit last. This does it for a dressed systems
assuming estates has the same ordering
Args:
target_qubit (int): Qubit for which operator is built.
estates (list): eigenstates in the dressed frame
h_osc (dict): Dict of number of levels in each oscillator.
h_qub (dict): Dict of number of levels in each qubit system.
level (int): Level of qubit system to be measured.
def opr_apply(opr, state_vec):
""" Apply operator to given quantum state
Returns:
Qobj: Occupation number operator for target qubit.
"""
return spmv_csr(opr.data.data,
opr.data.indices,
opr.data.indptr,
state_vec)
# reverse sort by index
rev_h_osc = sorted(h_osc.items(), key=lambda x: x[0])[::-1]
rev_h_qub = sorted(h_qub.items(), key=lambda x: x[0])[::-1]
# osc_n * … * osc_0 * qubit_n * … * qubit_0
states = []
proj_op = 0 * fock_dm(len(estates), 0)
for ii, dd in rev_h_osc:
states.append(basis(dd, 0))
for ii, dd in rev_h_qub:
if ii == target_qubit:
states.append(basis(dd, level))
else:
states.append(state(np.ones(dd)))
out_state = tensor(states)
for ii, estate in enumerate(estates):
if out_state[ii] == 1:
proj_op += estate * estate.dag()
return proj_op
def get_oper(name, *args):
@ -201,16 +165,7 @@ def get_oper(name, *args):
return __operdict.get(name, qeye)(*args)
def get_func(name, qobj):
""" Apply function of given name
"""
return __funcdict.get(name, dammy)(qobj)
__operdict = {'X': sigmax, 'Y': sigmay, 'Z': sigmaz,
'Sp': create, 'Sm': destroy, 'I': qeye,
'O': num, 'P': project, 'A': destroy,
'C': create, 'N': num}
__funcdict = {'cos': cos, 'sin': sin, 'exp': exp,
'sqrt': sqrt, 'conj': conj, 'dag': dag}

View File

@ -2,7 +2,7 @@
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
# (C) Copyright IBM 2018, 2019, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
@ -46,16 +46,68 @@
###############################################################################
# pylint: disable=invalid-name
"""States
"""
This module contains settings for qutip_lite functionality
"""
# use auto tidyup
auto_tidyup = True
# use auto tidyup dims on multiplication
auto_tidyup_dims = True
# detect hermiticity
auto_herm = True
# general absolute tolerance
atol = 1e-12
# use auto tidyup absolute tolerance
auto_tidyup_atol = 1e-12
import numpy as np
from .qobj import Qobj
from .fastsparse import fast_csr_matrix
# used by qobj_generators
def fock_dm(N, n=0, offset=0):
"""Density matrix representation of a Fock state
Constructed via outer product of :func:`qutip.states.fock`.
Args:
N (int): Number of Fock states in Hilbert space.
n (int): Desired number state, defaults to 0 if omitted.
offset (int): Energy level offset.
Returns:
Qobj: Density matrix representation of Fock state.
"""
psi = basis(N, n, offset=offset)
return psi * psi.dag()
def basis(N, n=0, offset=0):
"""Generates the vector representation of a Fock state.
Args:
N (int): Number of Fock states in Hilbert space.
n (int): Integer corresponding to desired number
state, defaults to 0 if omitted.
offset (int): The lowest number state that is included
in the finite number state representation
of the state.
Returns:
Qobj: Qobj representing the requested number state ``|n>``.
Raises:
ValueError: Invalid input value.
"""
if (not isinstance(N, (int, np.integer))) or N < 0:
raise ValueError("N must be integer N >= 0")
if (not isinstance(n, (int, np.integer))) or n < offset:
raise ValueError("n must be integer n >= 0")
if n - offset > (N - 1): # check if n is within bounds
raise ValueError("basis vector index need to be in n <= N-1")
data = np.array([1], dtype=complex)
ind = np.array([0], dtype=np.int32)
ptr = np.array([0] * ((n - offset) + 1) + [1] * (N - (n - offset)),
dtype=np.int32)
return Qobj(fast_csr_matrix((data, ind, ptr), shape=(N, 1)), isherm=False)

View File

@ -2,7 +2,7 @@
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
# (C) Copyright IBM 2018, 2019, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
@ -52,7 +52,6 @@ import numpy as np
# pylint: disable=no-name-in-module, import-error
from .cy.spmath import zcsr_kron
from .qobj import Qobj
from ..qutip_lite.settings import auto_tidyup
def tensor(*args):
@ -109,4 +108,4 @@ def tensor(*args):
if not out.isherm:
out._isherm = None
return out.tidyup() if auto_tidyup else out
return out

View File

@ -1,13 +0,0 @@
include(Linter)
include(cython_utils)
# We need to remove the -static flag, because Python Extension system only supports
# dynamic linked libraries, but we want to build a shared libraries with the least
# dependencies we can, so some of these dependencies are linked statically into our
# shared library.
string(REPLACE " -static " "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
set(CYTHON_INSTALL_DIR "qiskit/providers/aer/pulse/qutip_lite/cy")
add_cython_module(spconvert src/zspmv.cpp)
add_cython_module(spmath src/zspmv.cpp)
add_cython_module(sparse_utils src/zspmv.cpp)
add_cython_module(spmatfuncs src/zspmv.cpp)

View File

@ -1,651 +0,0 @@
#!python
#cython: language_level=3
# This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2011 and later, The QuTiP Project.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
import numpy as np
from scipy.sparse import coo_matrix
from qiskit.providers.aer.pulse.qutip_lite.fastsparse import fast_csr_matrix
cimport numpy as np
cimport cython
from libcpp.algorithm cimport sort
from libcpp.vector cimport vector
from .sparse_structs cimport CSR_Matrix, COO_Matrix
np.import_array()
from libc.string cimport memset
cdef extern from "numpy/arrayobject.h" nogil:
void PyArray_ENABLEFLAGS(np.ndarray arr, int flags)
void PyDataMem_FREE(void * ptr)
void PyDataMem_RENEW(void * ptr, size_t size)
void PyDataMem_NEW(size_t size)
#Struct used for CSR indices sorting
cdef struct _data_ind_pair:
double complex data
int ind
ctypedef _data_ind_pair data_ind_pair
ctypedef int (*cfptr)(data_ind_pair, data_ind_pair)
cdef void raise_error_CSR(int E, CSR_Matrix * C = NULL):
if not C.numpy_lock and C != NULL:
free_CSR(C)
if E == -1:
raise MemoryError('Could not allocate memory.')
elif E == -2:
raise Exception('Error manipulating CSR_Matrix structure.')
elif E == -3:
raise Exception('CSR_Matrix is not initialized.')
elif E == -4:
raise Exception('NumPy already has lock on data.')
elif E == -5:
raise Exception('Cannot expand data structures past max_length.')
elif E == -6:
raise Exception('CSR_Matrix cannot be expanded.')
elif E == -7:
raise Exception('Data length cannot be larger than max_length')
else:
raise Exception('Error in Cython code.')
cdef void raise_error_COO(int E, COO_Matrix * C = NULL):
if not C.numpy_lock and C != NULL:
free_COO(C)
if E == -1:
raise MemoryError('Could not allocate memory.')
elif E == -2:
raise Exception('Error manipulating COO_Matrix structure.')
elif E == -3:
raise Exception('COO_Matrix is not initialized.')
elif E == -4:
raise Exception('NumPy already has lock on data.')
elif E == -5:
raise Exception('Cannot expand data structures past max_length.')
elif E == -6:
raise Exception('COO_Matrix cannot be expanded.')
elif E == -7:
raise Exception('Data length cannot be larger than max_length')
else:
raise Exception('Error in Cython code.')
cdef inline int int_min(int a, int b) nogil:
return b if b < a else a
cdef inline int int_max(int a, int b) nogil:
return a if a > b else b
@cython.boundscheck(False)
@cython.wraparound(False)
cdef void init_CSR(CSR_Matrix * mat, int nnz, int nrows, int ncols = 0,
int max_length = 0, int init_zeros = 1):
"""
Initialize CSR_Matrix struct. Matrix is assumed to be square with
shape nrows x nrows. Manually set mat.ncols otherwise
Parameters
----------
mat : CSR_Matrix *
Pointer to struct.
nnz : int
Length of data and indices arrays. Also number of nonzero elements
nrows : int
Number of rows in matrix. Also gives length
of indptr array (nrows+1).
ncols : int (default = 0)
Number of cols in matrix. Default is ncols = nrows.
max_length : int (default = 0)
Maximum length of data and indices arrays. Used for resizing.
Default value of zero indicates no resizing.
"""
if max_length == 0:
max_length = nnz
if nnz > max_length:
raise_error_CSR(-7, mat)
if init_zeros:
mat.data = <double complex *>PyDataMem_NEW(nnz * sizeof(double complex))
memset(&mat.data[0],0,nnz * sizeof(double complex))
else:
mat.data = <double complex *>PyDataMem_NEW(nnz * sizeof(double complex))
if mat.data == NULL:
raise_error_CSR(-1, mat)
if init_zeros:
mat.indices = <int *>PyDataMem_NEW(nnz * sizeof(int))
mat.indptr = <int *>PyDataMem_NEW((nrows+1) * sizeof(int))
memset(&mat.indices[0],0,nnz * sizeof(int))
memset(&mat.indptr[0],0,(nrows+1) * sizeof(int))
else:
mat.indices = <int *>PyDataMem_NEW(nnz * sizeof(int))
mat.indptr = <int *>PyDataMem_NEW((nrows+1) * sizeof(int))
mat.nnz = nnz
mat.nrows = nrows
if ncols == 0:
mat.ncols = nrows
else:
mat.ncols = ncols
mat.is_set = 1
mat.max_length = max_length
mat.numpy_lock = 0
@cython.boundscheck(False)
@cython.wraparound(False)
cdef void copy_CSR(CSR_Matrix * out, CSR_Matrix * mat):
"""
Copy a CSR_Matrix.
"""
cdef size_t kk
if not mat.is_set:
raise_error_CSR(-3)
elif out.is_set:
raise_error_CSR(-2)
init_CSR(out, mat.nnz, mat.nrows, mat.nrows, mat.max_length)
# We cannot use memcpy here since there are issues with
# doing so on Win with the GCC compiler
for kk in range(mat.nnz):
out.data[kk] = mat.data[kk]
out.indices[kk] = mat.indices[kk]
for kk in range(mat.nrows+1):
out.indptr[kk] = mat.indptr[kk]
@cython.boundscheck(False)
@cython.wraparound(False)
cdef void init_COO(COO_Matrix * mat, int nnz, int nrows, int ncols = 0,
int max_length = 0, int init_zeros = 1):
"""
Initialize COO_Matrix struct. Matrix is assumed to be square with
shape nrows x nrows. Manually set mat.ncols otherwise
Parameters
----------
mat : COO_Matrix *
Pointer to struct.
nnz : int
Number of nonzero elements.
nrows : int
Number of rows in matrix.
nrows : int (default = 0)
Number of cols in matrix. Default is ncols = nrows.
max_length : int (default = 0)
Maximum length of arrays. Used for resizing.
Default value of zero indicates no resizing.
"""
if max_length == 0:
max_length = nnz
if nnz > max_length:
raise_error_COO(-7, mat)
if init_zeros:
mat.data = <double complex *>PyDataMem_NEW(nnz * sizeof(double complex))
memset(&mat.data[0],0,nnz * sizeof(double complex))
else:
mat.data = <double complex *>PyDataMem_NEW(nnz * sizeof(double complex))
if mat.data == NULL:
raise_error_COO(-1, mat)
if init_zeros:
mat.rows = <int *>PyDataMem_NEW(nnz * sizeof(int))
mat.cols = <int *>PyDataMem_NEW(nnz * sizeof(int))
memset(&mat.rows[0],0,nnz * sizeof(int))
memset(&mat.cols[0],0,nnz * sizeof(int))
else:
mat.rows = <int *>PyDataMem_NEW(nnz * sizeof(int))
mat.cols = <int *>PyDataMem_NEW(nnz * sizeof(int))
mat.nnz = nnz
mat.nrows = nrows
if ncols == 0:
mat.ncols = nrows
else:
mat.ncols = ncols
mat.is_set = 1
mat.max_length = max_length
mat.numpy_lock = 0
@cython.boundscheck(False)
@cython.wraparound(False)
cdef void free_CSR(CSR_Matrix * mat):
"""
Manually free CSR_Matrix data structures if
data is not locked by NumPy.
"""
if not mat.numpy_lock and mat.is_set:
if mat.data != NULL:
PyDataMem_FREE(mat.data)
if mat.indices != NULL:
PyDataMem_FREE(mat.indices)
if mat.indptr != NULL:
PyDataMem_FREE(mat.indptr)
mat.is_set = 0
else:
raise_error_CSR(-2)
@cython.boundscheck(False)
@cython.wraparound(False)
cdef void free_COO(COO_Matrix * mat):
"""
Manually free COO_Matrix data structures if
data is not locked by NumPy.
"""
if not mat.numpy_lock and mat.is_set:
if mat.data != NULL:
PyDataMem_FREE(mat.data)
if mat.rows != NULL:
PyDataMem_FREE(mat.rows)
if mat.cols != NULL:
PyDataMem_FREE(mat.cols)
mat.is_set = 0
else:
raise_error_COO(-2)
@cython.boundscheck(False)
@cython.wraparound(False)
cdef void shorten_CSR(CSR_Matrix * mat, int N):
"""
Shortends the length of CSR data and indices arrays.
"""
if (not mat.numpy_lock) and mat.is_set:
mat.data = <double complex *>PyDataMem_RENEW(mat.data, N * sizeof(double complex))
mat.indices = <int *>PyDataMem_RENEW(mat.indices, N * sizeof(int))
mat.nnz = N
else:
if mat.numpy_lock:
raise_error_CSR(-4, mat)
elif not mat.is_set:
raise_error_CSR(-3, mat)
@cython.boundscheck(False)
@cython.wraparound(False)
cdef void expand_CSR(CSR_Matrix * mat, int init_zeros=0):
"""
Expands the length of CSR data and indices arrays to accomodate
more nnz. THIS IS CURRENTLY NOT USED
"""
cdef size_t ii
cdef int new_size
if mat.nnz == mat.max_length:
raise_error_CSR(-5, mat) #Cannot expand data past max_length.
elif (not mat.numpy_lock) and mat.is_set:
new_size = int_min(2*mat.nnz, mat.max_length)
new_data = <double complex *>PyDataMem_RENEW(mat.data, new_size * sizeof(double complex))
if new_data == NULL:
raise_error_CSR(-1, mat)
else:
mat.data = new_data
if init_zeros == 1:
for ii in range(mat.nnz, new_size):
mat.data[ii] = 0
new_ind = <int *>PyDataMem_RENEW(mat.indices, new_size * sizeof(int))
mat.indices = new_ind
if init_zeros == 1:
for ii in range(mat.nnz, new_size):
mat.indices[ii] = 0
mat.nnz = new_size
else:
if mat.numpy_lock:
raise_error_CSR(-4, mat)
elif not mat.is_set:
raise_error_CSR(-3, mat)
@cython.boundscheck(False)
@cython.wraparound(False)
cdef object CSR_to_scipy(CSR_Matrix * mat):
"""
Converts a CSR_Matrix struct to a SciPy csr_matrix class object.
The NumPy arrays are generated from the pointers, and the lifetime
of the pointer memory is tied to that of the NumPy array
(i.e. automatic garbage cleanup.)
Parameters
----------
mat : CSR_Matrix *
Pointer to CSR_Matrix.
"""
cdef np.npy_intp dat_len, ptr_len
cdef np.ndarray[complex, ndim=1] _data
cdef np.ndarray[int, ndim=1] _ind, _ptr
if (not mat.numpy_lock) and mat.is_set:
dat_len = mat.nnz
ptr_len = mat.nrows+1
_data = np.PyArray_SimpleNewFromData(1, &dat_len, np.NPY_COMPLEX128, mat.data)
PyArray_ENABLEFLAGS(_data, np.NPY_OWNDATA)
_ind = np.PyArray_SimpleNewFromData(1, &dat_len, np.NPY_INT32, mat.indices)
PyArray_ENABLEFLAGS(_ind, np.NPY_OWNDATA)
_ptr = np.PyArray_SimpleNewFromData(1, &ptr_len, np.NPY_INT32, mat.indptr)
PyArray_ENABLEFLAGS(_ptr, np.NPY_OWNDATA)
mat.numpy_lock = 1
return fast_csr_matrix((_data, _ind, _ptr), shape=(mat.nrows,mat.ncols))
else:
if mat.numpy_lock:
raise_error_CSR(-4)
elif not mat.is_set:
raise_error_CSR(-3)
@cython.boundscheck(False)
@cython.wraparound(False)
cdef object COO_to_scipy(COO_Matrix * mat):
"""
Converts a COO_Matrix struct to a SciPy coo_matrix class object.
The NumPy arrays are generated from the pointers, and the lifetime
of the pointer memory is tied to that of the NumPy array
(i.e. automatic garbage cleanup.)
Parameters
----------
mat : COO_Matrix *
Pointer to COO_Matrix.
"""
cdef np.npy_intp dat_len
cdef np.ndarray[complex, ndim=1] _data
cdef np.ndarray[int, ndim=1] _row, _col
if (not mat.numpy_lock) and mat.is_set:
dat_len = mat.nnz
_data = np.PyArray_SimpleNewFromData(1, &dat_len, np.NPY_COMPLEX128, mat.data)
PyArray_ENABLEFLAGS(_data, np.NPY_OWNDATA)
_row = np.PyArray_SimpleNewFromData(1, &dat_len, np.NPY_INT32, mat.rows)
PyArray_ENABLEFLAGS(_row, np.NPY_OWNDATA)
_col = np.PyArray_SimpleNewFromData(1, &dat_len, np.NPY_INT32, mat.cols)
PyArray_ENABLEFLAGS(_col, np.NPY_OWNDATA)
mat.numpy_lock = 1
return coo_matrix((_data, (_row, _col)), shape=(mat.nrows,mat.ncols))
else:
if mat.numpy_lock:
raise_error_COO(-4)
elif not mat.is_set:
raise_error_COO(-3)
@cython.boundscheck(False)
@cython.wraparound(False)
cdef void COO_to_CSR(CSR_Matrix * out, COO_Matrix * mat):
"""
Conversion from COO to CSR. Not in place,
but result is sorted correctly.
"""
cdef int i, j, iad, j0
cdef double complex val
cdef size_t kk
init_CSR(out, mat.nnz, mat.nrows, mat.ncols, max_length=0, init_zeros=1)
# Determine row lengths
for kk in range(mat.nnz):
out.indptr[mat.rows[kk]] = out.indptr[mat.rows[kk]] + 1
# Starting position of rows
j = 0
for kk in range(mat.nrows):
j0 = out.indptr[kk]
out.indptr[kk] = j
j += j0
#Do the data
for kk in range(mat.nnz):
i = mat.rows[kk]
j = mat.cols[kk]
val = mat.data[kk]
iad = out.indptr[i]
out.data[iad] = val
out.indices[iad] = j
out.indptr[i] = iad+1
# Shift back
for kk in range(mat.nrows,0,-1):
out.indptr[kk] = out.indptr[kk-1]
out.indptr[0] = 0
@cython.boundscheck(False)
@cython.wraparound(False)
cdef void CSR_to_COO(COO_Matrix * out, CSR_Matrix * mat):
"""
Converts a CSR_Matrix to a COO_Matrix.
"""
cdef int k1, k2
cdef size_t jj, kk
init_COO(out, mat.nnz, mat.nrows, mat.ncols)
for kk in range(mat.nnz):
out.data[kk] = mat.data[kk]
out.cols[kk] = mat.indices[kk]
for kk in range(mat.nrows-1,0,-1):
k1 = mat.indptr[kk+1]
k2 = mat.indptr[kk]
for jj in range(k2, k1):
out.rows[jj] = kk
@cython.boundscheck(False)
@cython.wraparound(False)
cdef void COO_to_CSR_inplace(CSR_Matrix * out, COO_Matrix * mat):
"""
In place conversion from COO to CSR. In place, but not sorted.
The length of the COO (data,rows,cols) must be equal to the NNZ
in the final matrix (i.e. no padded zeros on ends of arrays).
"""
cdef size_t kk
cdef int i, j, init, inext, jnext, ipos
cdef int * _tmp_rows
cdef complex val, val_next
cdef int * work = <int *>PyDataMem_NEW((mat.nrows+1) * sizeof(int))
memset(&work[0],0,(mat.nrows+1) * sizeof(int))
# Determine output indptr array
for kk in range(mat.nnz):
i = mat.rows[kk]
work[i+1] += 1
work[0] = 0
for kk in range(mat.nrows):
work[kk+1] += work[kk]
if mat.nnz < (mat.nrows+1):
_tmp_rows = <int *>PyDataMem_RENEW(mat.rows, (mat.nrows+1) * sizeof(int))
mat.rows = _tmp_rows
init = 0
while init < mat.nnz:
while (mat.rows[init] < 0):
init += 1
val = mat.data[init]
i = mat.rows[init]
j = mat.cols[init]
mat.rows[init] = -1
while 1:
ipos = work[i]
val_next = mat.data[ipos]
inext = mat.rows[ipos]
jnext = mat.cols[ipos]
mat.data[ipos] = val
mat.cols[ipos] = j
mat.rows[ipos] = -1
work[i] += 1
if inext < 0:
break
val = val_next
i = inext
j = jnext
init += 1
for kk in range(mat.nrows):
mat.rows[kk+1] = work[kk]
mat.rows[0] = 0
if mat.nnz > (mat.nrows+1):
_tmp_rows = <int *>PyDataMem_RENEW(mat.rows, (mat.nrows+1) * sizeof(int))
mat.rows = _tmp_rows
#Free working array
PyDataMem_FREE(work)
#Set CSR pointers to original COO data.
out.data = mat.data
out.indices = mat.cols
out.indptr = mat.rows
out.nrows = mat.nrows
out.ncols = mat.ncols
out.nnz = mat.nnz
out.max_length = mat.nnz
out.is_set = 1
out.numpy_lock = 0
@cython.boundscheck(False)
@cython.wraparound(False)
cdef int ind_sort(data_ind_pair x, data_ind_pair y):
return x.ind < y.ind
@cython.boundscheck(False)
@cython.wraparound(False)
cdef void sort_indices(CSR_Matrix * mat):
"""
Sorts the indices of a CSR_Matrix inplace.
"""
cdef size_t ii, jj
cdef vector[data_ind_pair] pairs
cdef cfptr cfptr_ = &ind_sort
cdef int row_start, row_end, length
for ii in range(mat.nrows):
row_start = mat.indptr[ii]
row_end = mat.indptr[ii+1]
length = row_end - row_start
pairs.resize(length)
for jj in range(length):
pairs[jj].data = mat.data[row_start+jj]
pairs[jj].ind = mat.indices[row_start+jj]
sort(pairs.begin(),pairs.end(),cfptr_)
for jj in range(length):
mat.data[row_start+jj] = pairs[jj].data
mat.indices[row_start+jj] = pairs[jj].ind
@cython.boundscheck(False)
@cython.wraparound(False)
cdef CSR_Matrix CSR_from_scipy(object A):
"""
Converts a SciPy CSR sparse matrix to a
CSR_Matrix struct.
"""
cdef complex[::1] data = A.data
cdef int[::1] ind = A.indices
cdef int[::1] ptr = A.indptr
cdef int nrows = A.shape[0]
cdef int ncols = A.shape[1]
cdef int nnz = ptr[nrows]
cdef CSR_Matrix mat
mat.data = &data[0]
mat.indices = &ind[0]
mat.indptr = &ptr[0]
mat.nrows = nrows
mat.ncols = ncols
mat.nnz = nnz
mat.max_length = nnz
mat.is_set = 1
mat.numpy_lock = 1
return mat
@cython.boundscheck(False)
@cython.wraparound(False)
cdef void CSR_from_scipy_inplace(object A, CSR_Matrix* mat):
"""
Converts a SciPy CSR sparse matrix to a
CSR_Matrix struct.
"""
cdef complex[::1] data = A.data
cdef int[::1] ind = A.indices
cdef int[::1] ptr = A.indptr
cdef int nrows = A.shape[0]
cdef int ncols = A.shape[1]
cdef int nnz = ptr[nrows]
mat.data = &data[0]
mat.indices = &ind[0]
mat.indptr = &ptr[0]
mat.nrows = nrows
mat.ncols = ncols
mat.nnz = nnz
mat.max_length = nnz
mat.is_set = 1
mat.numpy_lock = 1
@cython.boundscheck(False)
@cython.wraparound(False)
cdef COO_Matrix COO_from_scipy(object A):
"""
Converts a SciPy COO sparse matrix to a
COO_Matrix struct.
"""
cdef complex[::1] data = A.data
cdef int[::1] rows = A.row
cdef int[::1] cols = A.col
cdef int nrows = A.shape[0]
cdef int ncols = A.shape[1]
cdef int nnz = data.shape[0]
cdef COO_Matrix mat
mat.data = &data[0]
mat.rows = &rows[0]
mat.cols = &cols[0]
mat.nrows = nrows
mat.ncols = ncols
mat.nnz = nnz
mat.max_length = nnz
mat.is_set = 1
mat.numpy_lock = 1
return mat
@cython.boundscheck(False)
@cython.wraparound(False)
cdef void identity_CSR(CSR_Matrix * mat, unsigned int nrows):
cdef size_t kk
init_CSR(mat, nrows, nrows, nrows, 0, 0)
for kk in range(nrows):
mat.data[kk] = 1
mat.indices[kk] = kk
mat.indptr[kk] = kk
mat.indptr[nrows] = nrows

View File

@ -1,115 +0,0 @@
#!python
#cython: language_level=3
# This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
cimport numpy as cnp
cimport cython
from libcpp cimport bool
include "parameters.pxi"
cpdef cnp.ndarray[CTYPE_t, ndim=1, mode="c"] spmv_csr(complex[::1] data,
int[::1] ind, int[::1] ptr, complex[::1] vec)
cdef void spmvpy(complex * data,
int * ind,
int * ptr,
complex * vec,
complex a,
complex * out,
unsigned int nrows)
cpdef cy_expect_rho_vec_csr(complex[::1] data,
int[::1] idx,
int[::1] ptr,
complex[::1] rho_vec,
int herm)
cpdef cy_expect_psi(object A,
complex[::1] vec,
bool isherm)
cpdef cy_expect_psi_csr(complex[::1] data,
int[::1] ind,
int[::1] ptr,
complex[::1] vec,
bool isherm)
cdef void _spmm_c_py(complex * data,
int * ind,
int * ptr,
complex * mat,
complex a,
complex * out,
unsigned int sp_rows,
unsigned int nrows,
unsigned int ncols)
cpdef void spmmpy_c(complex[::1] data,
int[::1] ind,
int[::1] ptr,
complex[:,::1] M,
complex a,
complex[:,::1] out)
cpdef cnp.ndarray[complex, ndim=1, mode="c"] spmmc(object sparse,
complex[:,::1] mat)
cdef void _spmm_f_py(complex * data,
int * ind,
int * ptr,
complex * mat,
complex a,
complex * out,
unsigned int sp_rows,
unsigned int nrows,
unsigned int ncols)
cpdef void spmmpy_f(complex[::1] data,
int[::1] ind,
int[::1] ptr,
complex[::1,:] mat,
complex a,
complex[::1,:] out)
cpdef cnp.ndarray[complex, ndim=1, mode="c"] spmmf(object sparse,
complex[::1,:] mat)
cpdef cnp.ndarray[complex, ndim=1, mode="c"] spmm(object sparse,
cnp.ndarray[complex, ndim=2] mat)

View File

@ -1,570 +0,0 @@
#!python
#cython: language_level=3
# This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
import numpy as np
cimport numpy as cnp
cimport cython
cimport libc.math
from libcpp cimport bool
cdef extern from "src/zspmv.hpp" nogil:
void zspmvpy(double complex *data, int *ind, int *ptr, double complex *vec,
double complex a, double complex *out, int nrows)
include "complex_math.pxi"
@cython.boundscheck(False)
@cython.wraparound(False)
cpdef cnp.ndarray[complex, ndim=1, mode="c"] spmv(
object super_op,
complex[::1] vec):
"""
Sparse matrix, dense vector multiplication.
Here the vector is assumed to have one-dimension.
Matrix must be in CSR format and have complex entries.
Parameters
----------
super_op : csr matrix
vec : array
Dense vector for multiplication. Must be one-dimensional.
Returns
-------
out : array
Returns dense array.
"""
return spmv_csr(super_op.data, super_op.indices, super_op.indptr, vec)
@cython.boundscheck(False)
@cython.wraparound(False)
cpdef cnp.ndarray[complex, ndim=1, mode="c"] spmv_csr(complex[::1] data,
int[::1] ind, int[::1] ptr, complex[::1] vec):
"""
Sparse matrix, dense vector multiplication.
Here the vector is assumed to have one-dimension.
Matrix must be in CSR format and have complex entries.
Parameters
----------
data : array
Data for sparse matrix.
idx : array
Indices for sparse matrix data.
ptr : array
Pointers for sparse matrix data.
vec : array
Dense vector for multiplication. Must be one-dimensional.
Returns
-------
out : array
Returns dense array.
"""
cdef unsigned int num_rows = ptr.shape[0] - 1
cdef cnp.ndarray[complex, ndim=1, mode="c"] out = np.zeros((num_rows), dtype=np.complex)
zspmvpy(&data[0], &ind[0], &ptr[0], &vec[0], 1.0, &out[0], num_rows)
return out
@cython.boundscheck(False)
@cython.wraparound(False)
def spmvpy_csr(complex[::1] data,
int[::1] ind, int[::1] ptr, complex[::1] vec,
complex alpha, complex[::1] out):
"""
Sparse matrix, dense vector multiplication.
Here the vector is assumed to have one-dimension.
Matrix must be in CSR format and have complex entries.
Parameters
----------
data : array
Data for sparse matrix.
idx : array
Indices for sparse matrix data.
ptr : array
Pointers for sparse matrix data.
vec : array
Dense vector for multiplication. Must be one-dimensional.
alpha : complex
Numerical coefficient for sparse matrix.
out: array
Output array
"""
cdef unsigned int num_rows = vec.shape[0]
zspmvpy(&data[0], &ind[0], &ptr[0], &vec[0], alpha, &out[0], num_rows)
@cython.boundscheck(False)
@cython.wraparound(False)
cdef inline void spmvpy(complex* data, int* ind, int* ptr,
complex* vec,
complex a,
complex* out,
unsigned int nrows):
zspmvpy(data, ind, ptr, vec, a, out, nrows)
@cython.boundscheck(False)
@cython.wraparound(False)
cdef void _spmm_c_py(complex* data, int* ind, int* ptr,
complex* mat, complex a, complex* out,
unsigned int sp_rows, unsigned int nrows, unsigned int ncols):
"""
sparse*dense "C" ordered.
"""
cdef int row, col, ii, jj, row_start, row_end
for row from 0 <= row < sp_rows :
row_start = ptr[row]
row_end = ptr[row+1]
for jj from row_start <= jj < row_end:
for col in range(ncols):
out[row * ncols + col] += a*data[jj]*mat[ind[jj] * ncols + col]
cpdef void spmmpy_c(complex[::1] data, int[::1] ind, int[::1] ptr,
complex[:,::1] M, complex a, complex[:,::1] out):
"""
Sparse matrix, c ordered dense matrix multiplication.
The sparse matrix must be in CSR format and have complex entries.
Parameters
----------
data : array
Data for sparse matrix.
idx : array
Indices for sparse matrix data.
ptr : array
Pointers for sparse matrix data.
mat : array 2d
Dense matrix for multiplication. Must be in c mode.
alpha : complex
Numerical coefficient for sparse matrix.
out: array
Output array. Must be in c mode.
"""
cdef unsigned int sp_rows = ptr.shape[0]-1
cdef unsigned int nrows = M.shape[0]
cdef unsigned int ncols = M.shape[1]
_spmm_c_py(&data[0], &ind[0], &ptr[0], &M[0,0], 1.,
&out[0,0], sp_rows, nrows, ncols)
cpdef cnp.ndarray[complex, ndim=1, mode="c"] spmmc(object sparse,
complex[:,::1] mat):
"""
Sparse matrix, c ordered dense matrix multiplication.
The sparse matrix must be in CSR format and have complex entries.
Parameters
----------
sparse : csr matrix
mat : array 2d
Dense matrix for multiplication. Must be in c mode.
Returns
-------
out : array
Keep input ordering
"""
cdef unsigned int sp_rows = sparse.indptr.shape[0]-1
cdef unsigned int ncols = mat.shape[1]
cdef cnp.ndarray[complex, ndim=2, mode="c"] out = \
np.zeros((sp_rows, ncols), dtype=complex)
spmmpy_c(sparse.data, sparse.indices, sparse.indptr,
mat, 1., out)
return out
@cython.boundscheck(False)
@cython.wraparound(False)
cdef void _spmm_f_py(complex* data, int* ind, int* ptr,
complex* mat, complex a, complex* out,
unsigned int sp_rows, unsigned int nrows, unsigned int ncols):
"""
sparse*dense "F" ordered.
"""
cdef int col
for col in range(ncols):
spmvpy(data, ind, ptr, mat+nrows*col, a, out+sp_rows*col, sp_rows)
cpdef void spmmpy_f(complex[::1] data, int[::1] ind, int[::1] ptr,
complex[::1,:] mat, complex a, complex[::1,:] out):
"""
Sparse matrix, fortran ordered dense matrix multiplication.
The sparse matrix must be in CSR format and have complex entries.
Parameters
----------
data : array
Data for sparse matrix.
idx : array
Indices for sparse matrix data.
ptr : array
Pointers for sparse matrix data.
mat : array 2d
Dense matrix for multiplication. Must be in fortran mode.
alpha : complex
Numerical coefficient for sparse matrix.
out: array
Output array. Must be in fortran mode.
"""
cdef unsigned int sp_rows = ptr.shape[0]-1
cdef unsigned int nrows = mat.shape[0]
cdef unsigned int ncols = mat.shape[1]
_spmm_f_py(&data[0], &ind[0], &ptr[0], &mat[0,0], 1.,
&out[0,0], sp_rows, nrows, ncols)
cpdef cnp.ndarray[complex, ndim=1, mode="c"] spmmf(object sparse,
complex[::1,:] mat):
"""
Sparse matrix, fortran ordered dense matrix multiplication.
The sparse matrix must be in CSR format and have complex entries.
Parameters
----------
sparse : csr matrix
mat : array 2d
Dense matrix for multiplication. Must be in fortran mode.
Returns
-------
out : array
Keep input ordering
"""
cdef unsigned int sp_rows = sparse.indptr.shape[0]-1
cdef unsigned int ncols = mat.shape[1]
cdef cnp.ndarray[complex, ndim=2, mode="fortran"] out = \
np.zeros((sp_rows, ncols), dtype=complex, order="F")
spmmpy_f(sparse.data, sparse.indices, sparse.indptr,
mat, 1., out)
return out
cpdef cnp.ndarray[complex, ndim=1, mode="c"] spmm(object sparse,
cnp.ndarray[complex, ndim=2] mat):
"""
Sparse matrix, dense matrix multiplication.
The sparse matrix must be in CSR format and have complex entries.
Parameters
----------
sparse : csr matrix
mat : array 2d
Dense matrix for multiplication. Can be in c or fortran mode.
Returns
-------
out : array
Keep input ordering
"""
if mat.flags["F_CONTIGUOUS"]:
return spmmf(sparse, mat)
else:
return spmmc(sparse, mat)
@cython.boundscheck(False)
@cython.wraparound(False)
cpdef cnp.ndarray[complex, ndim=1, mode="c"] cy_ode_rhs(
double t,
complex[::1] rho,
complex[::1] data,
int[::1] ind,
int[::1] ptr):
cdef unsigned int nrows = rho.shape[0]
cdef cnp.ndarray[complex, ndim=1, mode="c"] out = \
np.zeros(nrows, dtype=complex)
zspmvpy(&data[0], &ind[0], &ptr[0], &rho[0], 1.0, &out[0], nrows)
return out
@cython.boundscheck(False)
@cython.wraparound(False)
cpdef cnp.ndarray[complex, ndim=1, mode="c"] cy_ode_psi_func_td(
double t,
cnp.ndarray[complex, ndim=1, mode="c"] psi,
object H_func,
object args):
H = H_func(t, args).data
return -1j * spmv_csr(H.data, H.indices, H.indptr, psi)
@cython.boundscheck(False)
@cython.wraparound(False)
cpdef cnp.ndarray[complex, ndim=1, mode="c"] cy_ode_psi_func_td_with_state(
double t,
cnp.ndarray[complex, ndim=1, mode="c"] psi,
object H_func,
object args):
H = H_func(t, psi, args)
return -1j * spmv_csr(H.data, H.indices, H.indptr, psi)
@cython.boundscheck(False)
@cython.wraparound(False)
cpdef cnp.ndarray[complex, ndim=1, mode="c"] cy_ode_rho_func_td(
double t,
cnp.ndarray[complex, ndim=1, mode="c"] rho,
object L0,
object L_func,
object args):
cdef object L
L = L0 + L_func(t, args).data
return spmv_csr(L.data, L.indices, L.indptr, rho)
@cython.boundscheck(False)
@cython.wraparound(False)
cpdef cy_expect_psi(object A, complex[::1] vec, bool isherm):
cdef complex[::1] data = A.data
cdef int[::1] ind = A.indices
cdef int[::1] ptr = A.indptr
cdef size_t row, jj
cdef int nrows = vec.shape[0]
cdef complex expt = 0, temp, cval
for row in range(nrows):
cval = conj(vec[row])
temp = 0
for jj in range(ptr[row], ptr[row+1]):
temp += data[jj]*vec[ind[jj]]
expt += cval*temp
if isherm :
return real(expt)
else:
return expt
@cython.boundscheck(False)
@cython.wraparound(False)
cpdef cy_expect_psi_csr(complex[::1] data,
int[::1] ind,
int[::1] ptr,
complex[::1] vec,
bool isherm):
cdef size_t row, jj
cdef int nrows = vec.shape[0]
cdef complex expt = 0, temp, cval
for row in range(nrows):
cval = conj(vec[row])
temp = 0
for jj in range(ptr[row], ptr[row+1]):
temp += data[jj]*vec[ind[jj]]
expt += cval*temp
if isherm :
return real(expt)
else:
return expt
@cython.boundscheck(False)
@cython.wraparound(False)
cpdef cy_expect_rho_vec(object super_op,
complex[::1] rho_vec,
int herm):
return cy_expect_rho_vec_csr(super_op.data,
super_op.indices,
super_op.indptr,
rho_vec,
herm)
@cython.boundscheck(False)
@cython.wraparound(False)
cpdef cy_expect_rho_vec_csr(complex[::1] data,
int[::1] idx,
int[::1] ptr,
complex[::1] rho_vec,
int herm):
cdef size_t row
cdef int jj,row_start,row_end
cdef int num_rows = rho_vec.shape[0]
cdef int n = <int>libc.math.sqrt(num_rows)
cdef complex dot = 0.0
for row from 0 <= row < num_rows by n+1:
row_start = ptr[row]
row_end = ptr[row+1]
for jj from row_start <= jj < row_end:
dot += data[jj]*rho_vec[idx[jj]]
if herm == 0:
return dot
else:
return real(dot)
@cython.boundscheck(False)
@cython.wraparound(False)
cpdef cy_spmm_tr(object op1, object op2, int herm):
cdef size_t row
cdef complex tr = 0.0
cdef int col1, row1_idx_start, row1_idx_end
cdef complex[::1] data1 = op1.data
cdef int[::1] idx1 = op1.indices
cdef int[::1] ptr1 = op1.indptr
cdef int col2, row2_idx_start, row2_idx_end
cdef complex[::1] data2 = op2.data
cdef int[::1] idx2 = op2.indices
cdef int[::1] ptr2 = op2.indptr
cdef int num_rows = ptr1.shape[0]-1
for row in range(num_rows):
row1_idx_start = ptr1[row]
row1_idx_end = ptr1[row + 1]
for row1_idx from row1_idx_start <= row1_idx < row1_idx_end:
col1 = idx1[row1_idx]
row2_idx_start = ptr2[col1]
row2_idx_end = ptr2[col1 + 1]
for row2_idx from row2_idx_start <= row2_idx < row2_idx_end:
col2 = idx2[row2_idx]
if col2 == row:
tr += data1[row1_idx] * data2[row2_idx]
break
if herm == 0:
return tr
else:
return real(tr)
@cython.boundscheck(False)
@cython.wraparound(False)
def expect_csr_ket(object A, object B, int isherm):
cdef complex[::1] Adata = A.data
cdef int[::1] Aind = A.indices
cdef int[::1] Aptr = A.indptr
cdef complex[::1] Bdata = B.data
cdef int[::1] Bptr = B.indptr
cdef int nrows = A.shape[0]
cdef int j
cdef size_t ii, jj
cdef double complex cval=0, row_sum, expt = 0
for ii in range(nrows):
if (Bptr[ii+1] - Bptr[ii]) != 0:
cval = conj(Bdata[Bptr[ii]])
row_sum = 0
for jj in range(Aptr[ii], Aptr[ii+1]):
j = Aind[jj]
if (Bptr[j+1] - Bptr[j]) != 0:
row_sum += Adata[jj]*Bdata[Bptr[j]]
expt += cval*row_sum
if isherm:
return real(expt)
else:
return expt
@cython.boundscheck(False)
@cython.wraparound(False)
cpdef double complex zcsr_mat_elem(object A, object left, object right, bool bra_ket=1):
"""
Computes the matrix element for an operator A and left and right vectors.
right must be a ket, but left can be a ket or bra vector. If left
is bra then bra_ket = 1, else set bra_ket = 0.
"""
cdef complex[::1] Adata = A.data
cdef int[::1] Aind = A.indices
cdef int[::1] Aptr = A.indptr
cdef int nrows = A.shape[0]
cdef complex[::1] Ldata = left.data
cdef int[::1] Lind = left.indices
cdef int[::1] Lptr = left.indptr
cdef int Lnnz = Lind.shape[0]
cdef complex[::1] Rdata = right.data
cdef int[::1] Rind = right.indices
cdef int[::1] Rptr = right.indptr
cdef int j, go, head=0
cdef size_t ii, jj, kk
cdef double complex cval=0, row_sum, mat_elem=0
for ii in range(nrows):
row_sum = 0
go = 0
if bra_ket:
for kk in range(head, Lnnz):
if Lind[kk] == ii:
cval = Ldata[kk]
head = kk
go = 1
else:
if (Lptr[ii] - Lptr[ii+1]) != 0:
cval = conj(Ldata[Lptr[ii]])
go = 1
if go:
for jj in range(Aptr[ii], Aptr[ii+1]):
j = Aind[jj]
if (Rptr[j] - Rptr[j+1]) != 0:
row_sum += Adata[jj]*Rdata[Rptr[j]]
mat_elem += cval*row_sum
return mat_elem

View File

@ -1,59 +0,0 @@
#!python
#cython: language_level=3
# This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2011 and later, The QuTiP Project.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
from qiskit.providers.aer.pulse.qutip_lite.cy.sparse_structs cimport CSR_Matrix
cdef void _zcsr_add(CSR_Matrix * A, CSR_Matrix * B,
CSR_Matrix * C, double complex alpha)
cdef int _zcsr_add_core(double complex * Adata, int * Aind, int * Aptr,
double complex * Bdata, int * Bind, int * Bptr,
double complex alpha,
CSR_Matrix * C,
int nrows, int ncols) nogil
cdef void _zcsr_mult(CSR_Matrix * A, CSR_Matrix * B, CSR_Matrix * C)
cdef void _zcsr_kron(CSR_Matrix * A, CSR_Matrix * B, CSR_Matrix * C)
cdef void _zcsr_kron_core(double complex * dataA, int * indsA, int * indptrA,
double complex * dataB, int * indsB, int * indptrB,
CSR_Matrix * out,
int rowsA, int rowsB, int colsB) nogil
cdef void _zcsr_transpose(CSR_Matrix * A, CSR_Matrix * B)
cdef void _zcsr_adjoint(CSR_Matrix * A, CSR_Matrix * B)

View File

@ -1,144 +0,0 @@
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
# pylint: disable=invalid-name
"""
Module for expectation values.
"""
__all__ = ['expect', 'variance']
import numpy as np
from .qobj import Qobj
# pylint: disable=import-error, no-name-in-module
from .cy.spmatfuncs import (cy_expect_rho_vec, cy_expect_psi, cy_spmm_tr,
expect_csr_ket)
expect_rho_vec = cy_expect_rho_vec
expect_psi = cy_expect_psi
def expect(oper, state):
"""Calculates the expectation value for operator(s) and state(s).
Args:
oper (Qobj or list): A single or a `list` or operators
for expectation value.
state (Qobj or list): A single or a `list` of quantum states
or density matrices.
Returns:
real or complex or ndarray: Expectation value. ``real`` if `oper` is
Hermitian, ``complex`` otherwise. A (nested)
array of expectaction values of state or
operator are arrays.
Raises:
TypeError: Inputs are not quantum objects.
"""
if isinstance(state, Qobj) and isinstance(oper, Qobj):
return _single_qobj_expect(oper, state)
elif isinstance(oper, (list, np.ndarray)):
if isinstance(state, Qobj):
if (all([op.isherm for op in oper]) and
(state.isket or state.isherm)):
return np.array([_single_qobj_expect(o, state) for o in oper])
else:
return np.array([_single_qobj_expect(o, state) for o in oper],
dtype=complex)
else:
return [expect(o, state) for o in oper]
elif isinstance(state, (list, np.ndarray)):
if oper.isherm and all([(op.isherm or op.type == 'ket')
for op in state]):
return np.array([_single_qobj_expect(oper, x) for x in state])
else:
return np.array([_single_qobj_expect(oper, x) for x in state],
dtype=complex)
else:
raise TypeError('Arguments must be quantum objects')
# pylint: disable=inconsistent-return-statements
def _single_qobj_expect(oper, state):
"""
Private function used by expect to calculate expectation values of Qobjs.
"""
if oper.isoper:
if oper.dims[1] != state.dims[0]:
raise Exception('Operator and state do not have same tensor ' +
'structure: %s and %s' %
(oper.dims[1], state.dims[0]))
if state.type == 'oper':
# calculates expectation value via TR(op*rho)
return cy_spmm_tr(oper.data, state.data,
oper.isherm and state.isherm)
elif state.type == 'ket':
# calculates expectation value via <psi|op|psi>
return expect_csr_ket(oper.data, state.data,
oper.isherm)
else:
raise TypeError('Invalid operand types')
def variance(oper, state):
"""
Variance of an operator for the given state vector or density matrix.
Args:
oper (qobj.Qobj): Operator for expectation value.
state (qobj.Qobj or list): A single or `list` of quantum states or density matrices..
Returns:
float: Variance of operator 'oper' for given state.
"""
return expect(oper ** 2, state) - expect(oper, state) ** 2

File diff suppressed because it is too large Load Diff

View File

@ -1,590 +0,0 @@
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
# pylint: disable=invalid-name, len-as-condition, no-name-in-module
# pylint: disable=import-error
"""
This module contains a collection of routines for operating on sparse
matrices on the scipy.sparse formats, for use internally by other modules
throughout QuTiP.
"""
__all__ = ['sp_fro_norm', 'sp_inf_norm', 'sp_L2_norm', 'sp_max_norm',
'sp_one_norm', 'sp_reshape', 'sp_eigs', 'sp_expm', 'sp_permute',
'sp_reverse_permute', 'sp_bandwidth', 'sp_profile']
import numpy as np
import scipy.linalg as la
import scipy.sparse as sp
import scipy.sparse.linalg as spla
from scipy.linalg.blas import get_blas_funcs
from .cy.sparse_utils import (_sparse_profile, _sparse_permute,
_sparse_reverse_permute, _sparse_bandwidth,
_isdiag, zcsr_one_norm, zcsr_inf_norm)
from .fastsparse import fast_csr_matrix
from .cy.spconvert import (zcsr_reshape)
_dznrm2 = get_blas_funcs("znrm2")
def sp_fro_norm(data):
"""
Frobius norm for sparse matrix
"""
out = np.sum(np.abs(data.data)**2)
return np.sqrt(out)
def sp_inf_norm(A):
"""
Infinity norm for sparse matrix
"""
return zcsr_inf_norm(A.data, A.indices,
A.indptr, A.shape[0],
A.shape[1])
def sp_L2_norm(A):
"""
L2 norm sparse vector
"""
if 1 not in A.shape:
raise TypeError("Use L2-norm only for vectors.")
if len(A.data):
return _dznrm2(A.data)
else:
return 0
def sp_max_norm(A):
"""
Max norm for sparse matrix
"""
return np.max(np.abs(A.data)) if any(A.data) else 0
def sp_one_norm(A):
"""
One norm for sparse matrix
"""
return zcsr_one_norm(A.data, A.indices,
A.indptr, A.shape[0],
A.shape[1])
# pylint: disable=redefined-builtin
def sp_reshape(A, shape, format='csr'):
"""
Reshapes a sparse matrix.
Args:
A (sparse): Input matrix in any format
shape (list):Desired shape of new matrix
format (str): Optional string indicating
desired output format
Returns:
csr_matrix: Reshaped sparse matrix
Raises:
ValueError: Invalid input.
"""
if not hasattr(shape, '__len__') or len(shape) != 2:
raise ValueError('Shape must be a list of two integers')
if format == 'csr':
return zcsr_reshape(A, shape[0], shape[1])
C = A.tocoo()
nrows, ncols = C.shape
size = nrows * ncols
new_size = shape[0] * shape[1]
if new_size != size:
raise ValueError('Total size of new array must be unchanged.')
flat_indices = ncols * C.row + C.col
new_row, new_col = divmod(flat_indices, shape[1])
B = sp.coo_matrix((C.data, (new_row, new_col)), shape=shape)
if format == 'coo':
return B
elif format == 'csc':
return B.tocsc()
elif format == 'lil':
return B.tolil()
else:
raise ValueError('Return format not valid.')
def _dense_eigs(data, isherm, vecs, N, eigvals, num_large, num_small):
"""
Internal functions for computing eigenvalues and eigenstates for a dense
matrix.
"""
evecs = None
if vecs:
if isherm:
if eigvals == 0:
evals, evecs = la.eigh(data)
else:
if num_small > 0:
evals, evecs = la.eigh(
data, eigvals=[0, num_small - 1])
if num_large > 0:
evals, evecs = la.eigh(
data, eigvals=[N - num_large, N - 1])
else:
evals, evecs = la.eig(data)
else:
if isherm:
if eigvals == 0:
evals = la.eigvalsh(data)
else:
if num_small > 0:
evals = la.eigvalsh(data, eigvals=[0, num_small - 1])
if num_large > 0:
evals = la.eigvalsh(data, eigvals=[N - num_large, N - 1])
else:
evals = la.eigvals(data)
_zipped = list(zip(evals, range(len(evals))))
_zipped.sort()
evals, perm = list(zip(*_zipped))
if vecs:
evecs = np.array([evecs[:, k] for k in perm])
if not isherm and eigvals > 0:
if vecs:
if num_small > 0:
evals, evecs = evals[:num_small], evecs[:num_small]
elif num_large > 0:
evals, evecs = evals[(N - num_large):], evecs[(N - num_large):]
else:
if num_small > 0:
evals = evals[:num_small]
elif num_large > 0:
evals = evals[(N - num_large):]
return np.array(evals), np.array(evecs)
def _sp_eigs(data, isherm, vecs, N, eigvals, num_large, num_small, tol,
maxiter):
"""
Internal functions for computing eigenvalues and eigenstates for a sparse
matrix.
"""
big_vals = np.array([])
small_vals = np.array([])
evecs = None
remove_one = False
if eigvals == (N - 1):
# calculate all eigenvalues and remove one at output if using sparse
eigvals = 0
num_small = int(np.ceil(N / 2.0))
num_large = N - num_small
remove_one = True
if vecs:
if isherm:
if num_large > 0:
big_vals, big_vecs = sp.linalg.eigsh(data, k=num_large,
which='LA', tol=tol,
maxiter=maxiter)
big_vecs = sp.csr_matrix(big_vecs, dtype=complex)
if num_small > 0:
small_vals, small_vecs = sp.linalg.eigsh(
data, k=num_small, which='SA',
tol=tol, maxiter=maxiter)
else:
if num_large > 0:
big_vals, big_vecs = sp.linalg.eigs(data, k=num_large,
which='LR', tol=tol,
maxiter=maxiter)
big_vecs = sp.csr_matrix(big_vecs, dtype=complex)
if num_small > 0:
small_vals, small_vecs = sp.linalg.eigs(
data, k=num_small, which='SR',
tol=tol, maxiter=maxiter)
if num_large != 0 and num_small != 0:
evecs = sp.hstack([small_vecs, big_vecs], format='csr')
elif num_large != 0 and num_small == 0:
evecs = big_vecs
elif num_large == 0 and num_small != 0:
evecs = small_vecs
else:
if isherm:
if num_large > 0:
big_vals = sp.linalg.eigsh(
data, k=num_large, which='LA',
return_eigenvectors=False, tol=tol, maxiter=maxiter)
if num_small > 0:
small_vals = sp.linalg.eigsh(
data, k=num_small, which='SA',
return_eigenvectors=False, tol=tol, maxiter=maxiter)
else:
if num_large > 0:
big_vals = sp.linalg.eigs(
data, k=num_large, which='LR',
return_eigenvectors=False, tol=tol, maxiter=maxiter)
if num_small > 0:
small_vals = sp.linalg.eigs(
data, k=num_small, which='SR',
return_eigenvectors=False, tol=tol, maxiter=maxiter)
evals = np.hstack((small_vals, big_vals))
if isherm:
evals = np.real(evals)
_zipped = list(zip(evals, range(len(evals))))
_zipped.sort()
evals, perm = list(zip(*_zipped))
if vecs:
evecs = np.array([evecs[:, k] for k in perm])
# remove last element if requesting N-1 eigs and using sparse
if remove_one:
evals = np.delete(evals, -1)
if vecs:
evecs = np.delete(evecs, -1)
return np.array(evals), np.array(evecs)
def sp_eigs(data, isherm, vecs=True, sparse=False, sort='low',
eigvals=0, tol=0, maxiter=100000):
"""Returns Eigenvalues and Eigenvectors for a sparse matrix.
Uses dense eigen-solver unless user sets sparse=True.
Args:
data (csr_matrix): Input matrix.
isherm (bool): Indicate whether the matrix is hermitian or not
vecs (bool): Flag for requesting eigenvectors
sparse (bool): Flag to use sparse solver
sort (str): Return lowest or highest eigenvals/vecs
eigvals (int): Number of eigenvals/vecs to return.
Default = 0 (return all)
tol (float): Tolerance for sparse eigensolver.
Default = 0 (Machine precision)
maxiter (int): Max. number of iterations used by sparse sigensolver.
Returns:
array: Eigenvalues and (by default) array of corresponding Eigenvectors.
Raises:
TypeError: Invalid input.
ValueError: Invalid input.
"""
if data.shape[0] != data.shape[1]:
raise TypeError("Can only diagonalize square matrices")
N = data.shape[0]
if eigvals == N:
eigvals = 0
if eigvals > N:
raise ValueError("Number of requested eigen vals/vecs must be <= N.")
# set number of large and small eigenvals/vecs
if eigvals == 0: # user wants all eigs (default)
D = int(np.ceil(N / 2.0))
num_large = N - D
if not np.mod(N, 2):
M = D
else:
M = D - 1
num_small = N - M
else: # if user wants only a few eigen vals/vecs
if sort == 'low':
num_small = eigvals
num_large = 0
elif sort == 'high':
num_large = eigvals
num_small = 0
else:
raise ValueError("Invalid option for 'sort'.")
# Dispatch to sparse/dense solvers
if sparse:
evals, evecs = _sp_eigs(data, isherm, vecs, N, eigvals, num_large,
num_small, tol, maxiter)
else:
evals, evecs = _dense_eigs(data.todense(), isherm, vecs, N, eigvals,
num_large, num_small)
if sort == 'high': # flip arrays to largest values first
if vecs:
evecs = np.flipud(evecs)
evals = np.flipud(evals)
return (evals, evecs) if vecs else evals
def sp_expm(A, sparse=False):
"""
Sparse matrix exponential.
"""
if _isdiag(A.indices, A.indptr, A.shape[0]):
A = sp.diags(np.exp(A.diagonal()), shape=A.shape,
format='csr', dtype=complex)
return A
if sparse:
E = spla.expm(A.tocsc())
else:
E = spla.expm(A.toarray())
return sp.csr_matrix(E)
def sp_permute(A, rperm=(), cperm=(), safe=True):
"""
Permutes the rows and columns of a sparse CSR/CSC matrix
according to the permutation arrays rperm and cperm, respectively.
Here, the permutation arrays specify the new order of the rows and
columns. i.e. [0,1,2,3,4] -> [3,0,4,1,2].
Args:
A (csr_matrix): Input matrix.
rperm (ndarray): Array of row permutations.
cperm (ndarray): Array of column permutations.
safe (bool): Check structure of permutation arrays.
Returns:
csr_matrix: CSR matrix with permuted rows/columns.
Raises:
ValueError: Invalid input.
"""
rperm = np.asarray(rperm, dtype=np.int32)
cperm = np.asarray(cperm, dtype=np.int32)
nrows = A.shape[0]
ncols = A.shape[1]
if len(rperm) == 0:
rperm = np.arange(nrows, dtype=np.int32)
if len(cperm) == 0:
cperm = np.arange(ncols, dtype=np.int32)
if safe:
if len(np.setdiff1d(rperm, np.arange(nrows))) != 0:
raise ValueError('Invalid row permutation array.')
if len(np.setdiff1d(cperm, np.arange(ncols))) != 0:
raise ValueError('Invalid column permutation array.')
shp = A.shape
kind = A.getformat()
if kind == 'csr':
flag = 0
elif kind == 'csc':
flag = 1
else:
raise ValueError('Input must be Qobj, CSR, or CSC matrix.')
data, ind, ptr = _sparse_permute(A.data, A.indices, A.indptr,
nrows, ncols, rperm, cperm, flag)
if kind == 'csr':
return fast_csr_matrix((data, ind, ptr), shape=shp)
return sp.csc_matrix((data, ind, ptr), shape=shp, dtype=data.dtype)
def sp_reverse_permute(A, rperm=(), cperm=(), safe=True):
"""
Performs a reverse permutations of the rows and columns of a sparse CSR/CSC
matrix according to the permutation arrays rperm and cperm, respectively.
Here, the permutation arrays specify the order of the rows and columns used
to permute the original array.
Args:
A (csr_matrix): Input matrix.
rperm (ndarray): Array of row permutations.
cperm (ndarray): Array of column permutations.
safe (bool): Check structure of permutation arrays.
Returns:
csr_matrix: CSR matrix with permuted rows/columns.
Raises:
Exception: Invalid permutation.
"""
rperm = np.asarray(rperm, dtype=np.int32)
cperm = np.asarray(cperm, dtype=np.int32)
nrows = A.shape[0]
ncols = A.shape[1]
if len(rperm) == 0:
rperm = np.arange(nrows, dtype=np.int32)
if len(cperm) == 0:
cperm = np.arange(ncols, dtype=np.int32)
if safe:
if len(np.setdiff1d(rperm, np.arange(nrows))) != 0:
raise Exception('Invalid row permutation array.')
if len(np.setdiff1d(cperm, np.arange(ncols))) != 0:
raise Exception('Invalid column permutation array.')
shp = A.shape
kind = A.getformat()
if kind == 'csr':
flag = 0
elif kind == 'csc':
flag = 1
else:
raise Exception('Input must be Qobj, CSR, or CSC matrix.')
data, ind, ptr = _sparse_reverse_permute(A.data, A.indices, A.indptr,
nrows, ncols, rperm, cperm, flag)
if kind == 'csr':
return fast_csr_matrix((data, ind, ptr), shape=shp)
return sp.csc_matrix((data, ind, ptr), shape=shp, dtype=data.dtype)
def sp_bandwidth(A):
"""
Returns the max(mb), lower(lb), and upper(ub) bandwidths of a
sparse CSR/CSC matrix.
If the matrix is symmetric then the upper and lower bandwidths are
identical. Diagonal matrices have a bandwidth equal to one.
Args:
A (csr_matrix): Input matrix
Returns:
tuple: Maximum, lower, and upper bandwidths
Raises:
Exception: Invalid input.
"""
nrows = A.shape[0]
ncols = A.shape[1]
if A.getformat() == 'csr':
return _sparse_bandwidth(A.indices, A.indptr, nrows)
elif A.getformat() == 'csc':
# Normal output is mb,lb,ub but since CSC
# is transpose of CSR switch lb and ub
mb, ub, lb = _sparse_bandwidth(A.indices, A.indptr, ncols)
return mb, lb, ub
else:
raise Exception('Invalid sparse input format.')
def sp_profile(A):
"""Returns the total, lower, and upper profiles of a sparse matrix.
If the matrix is symmetric then the upper and lower profiles are
identical. Diagonal matrices have zero profile.
Args:
A (csr_matrix): Input matrix
Returns:
tuple: Maximum, lower, and upper profiles.
Raises:
TypeError: Invalid inputs.
"""
if sp.isspmatrix_csr(A):
up = _sparse_profile(A.indices, A.indptr, A.shape[0])
A = A.tocsc()
lp = _sparse_profile(A.indices, A.indptr, A.shape[0])
elif sp.isspmatrix_csc(A):
lp = _sparse_profile(A.indices, A.indptr, A.shape[0])
A = A.tocsr()
up = _sparse_profile(A.indices, A.indptr, A.shape[0])
else:
raise TypeError('Input sparse matrix must be in CSR or CSC format.')
return up + lp, lp, up
def sp_isdiag(A):
"""Determine if sparse CSR matrix is diagonal.
Args:
A (csr_matrix); Input matrix
Returns:
int: True if matix is diagonal, False otherwise.
Raises:
TypeError: Invalid input.
"""
if not sp.isspmatrix_csr(A):
raise TypeError('Input sparse matrix must be in CSR format.')
return _isdiag(A.indices, A.indptr, A.shape[0])

View File

@ -1,359 +0,0 @@
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
# pylint: disable=invalid-name
"""States
"""
import numpy as np
import scipy.sparse as sp
from .qobj import Qobj
from .operators import destroy
from .fastsparse import fast_csr_matrix
def basis(N, n=0, offset=0):
"""Generates the vector representation of a Fock state.
Args:
N (int): Number of Fock states in Hilbert space.
n (int): Integer corresponding to desired number
state, defaults to 0 if omitted.
offset (int): The lowest number state that is included
in the finite number state representation
of the state.
Returns:
Qobj: Qobj representing the requested number state ``|n>``.
Raises:
ValueError: Invalid input value.
"""
if (not isinstance(N, (int, np.integer))) or N < 0:
raise ValueError("N must be integer N >= 0")
if (not isinstance(n, (int, np.integer))) or n < offset:
raise ValueError("n must be integer n >= 0")
if n - offset > (N - 1): # check if n is within bounds
raise ValueError("basis vector index need to be in n <= N-1")
data = np.array([1], dtype=complex)
ind = np.array([0], dtype=np.int32)
ptr = np.array([0] * ((n - offset) + 1) + [1] * (N - (n - offset)),
dtype=np.int32)
return Qobj(fast_csr_matrix((data, ind, ptr), shape=(N, 1)), isherm=False)
def qutrit_basis():
"""Basis states for a three level system (qutrit)
Returns:
array: Array of qutrit basis vectors
"""
return np.array([basis(3, 0), basis(3, 1), basis(3, 2)], dtype=object)
def coherent(N, alpha, offset=0, method='operator'):
"""Generates a coherent state with eigenvalue alpha.
Constructed using displacement operator on vacuum state.
Args:
N (int): Number of Fock states in Hilbert space.
alpha (complex): Eigenvalue of coherent state.
offset (int): The lowest number state that is included in the finite
number state representation of the state. Using a
non-zero offset will make the default method 'analytic'.
method (str): Method for generating coherent state.
Returns:
Qobj: Qobj quantum object for coherent state
Raises:
TypeError: Invalid input.
"""
if method == "operator" and offset == 0:
x = basis(N, 0)
a = destroy(N)
D = (alpha * a.dag() - np.conj(alpha) * a).expm()
return D * x
elif method == "analytic" or offset > 0:
sqrtn = np.sqrt(np.arange(offset, offset + N, dtype=complex))
sqrtn[0] = 1 # Get rid of divide by zero warning
data = alpha / sqrtn
if offset == 0:
data[0] = np.exp(-abs(alpha)**2 / 2.0)
else:
s = np.prod(np.sqrt(np.arange(1, offset + 1))) # sqrt factorial
data[0] = np.exp(-abs(alpha)**2 / 2.0) * alpha**(offset) / s
np.cumprod(data, out=sqrtn) # Reuse sqrtn array
return Qobj(sqrtn)
else:
raise TypeError(
"The method option can only take values 'operator' or 'analytic'")
def coherent_dm(N, alpha, offset=0, method='operator'):
"""Density matrix representation of a coherent state.
Constructed via outer product of :func:`qutip.states.coherent`
Parameters:
N (int): Number of Fock states in Hilbert space.
alpha (complex): Eigenvalue for coherent state.
offset (int): The lowest number state that is included in the
finite number state representation of the state.
method (str): Method for generating coherent density matrix.
Returns:
Qobj: Density matrix representation of coherent state.
Raises:
TypeError: Invalid input.
"""
if method == "operator":
psi = coherent(N, alpha, offset=offset)
return psi * psi.dag()
elif method == "analytic":
psi = coherent(N, alpha, offset=offset, method='analytic')
return psi * psi.dag()
else:
raise TypeError(
"The method option can only take values 'operator' or 'analytic'")
def fock_dm(N, n=0, offset=0):
"""Density matrix representation of a Fock state
Constructed via outer product of :func:`qutip.states.fock`.
Args:
N (int): Number of Fock states in Hilbert space.
n (int): Desired number state, defaults to 0 if omitted.
offset (int): Energy level offset.
Returns:
Qobj: Density matrix representation of Fock state.
"""
psi = basis(N, n, offset=offset)
return psi * psi.dag()
def fock(N, n=0, offset=0):
"""Bosonic Fock (number) state.
Same as :func:`qutip.states.basis`.
Args:
N (int): Number of states in the Hilbert space.
n (int): Desired number state, defaults to 0 if omitted.
offset (int): Energy level offset.
Returns:
Qobj: Requested number state :math:`\\left|n\\right>`.
"""
return basis(N, n, offset=offset)
def thermal_dm(N, n, method='operator'):
"""Density matrix for a thermal state of n particles
Args:
N (int): Number of basis states in Hilbert space.
n (float): Expectation value for number of particles
in thermal state.
method (str): Sets the method used to generate the
thermal state probabilities
Returns:
Qobj: Thermal state density matrix.
Raises:
ValueError: Invalid input.
"""
if n == 0:
return fock_dm(N, 0)
else:
i = np.arange(N)
if method == 'operator':
beta = np.log(1.0 / n + 1.0)
diags = np.exp(-1 * beta * i)
diags = diags / np.sum(diags)
# populates diagonal terms using truncated operator expression
rm = sp.spdiags(diags, 0, N, N, format='csr')
elif method == 'analytic':
# populates diagonal terms using analytic values
rm = sp.spdiags((1.0 + n) ** (-1.0) * (n / (1.0 + n)) ** (i),
0, N, N, format='csr')
else:
raise ValueError(
"'method' keyword argument must be 'operator' or 'analytic'")
return Qobj(rm)
def maximally_mixed_dm(N):
"""
Returns the maximally mixed density matrix for a Hilbert space of
dimension N.
Args:
N (int): Number of basis states in Hilbert space.
Returns:
Qobj: Thermal state density matrix.
Raises:
ValueError: Invalid input.
"""
if (not isinstance(N, (int, np.int64))) or N <= 0:
raise ValueError("N must be integer N > 0")
dm = sp.spdiags(np.ones(N, dtype=complex) / float(N),
0, N, N, format='csr')
return Qobj(dm, isherm=True)
def ket2dm(Q):
"""Takes input ket or bra vector and returns density matrix
formed by outer product.
Args:
Q (Qobj): Ket or bra type quantum object.
Returns:
Qobj: Density matrix formed by outer product of `Q`.
Raises:
TypeError: Invalid input.
"""
if Q.type == 'ket':
out = Q * Q.dag()
elif Q.type == 'bra':
out = Q.dag() * Q
else:
raise TypeError("Input is not a ket or bra vector.")
return Qobj(out)
#
# projection operator
#
def projection(N, n, m, offset=0):
"""The projection operator that projects state :math:`|m>` on state :math:`|n>`.
Args:
N (int): Number of basis states in Hilbert space.
n (float): The number states in the projection.
m (float): The number states in the projection.
offset (int): The lowest number state that is included in
the finite number state representation of the projector.
Returns:
Qobj: Requested projection operator.
"""
ket1 = basis(N, n, offset=offset)
ket2 = basis(N, m, offset=offset)
return ket1 * ket2.dag()
def zero_ket(N, dims=None):
"""
Creates the zero ket vector with shape Nx1 and
dimensions `dims`.
Parameters
----------
N : int
Hilbert space dimensionality
dims : list
Optional dimensions if ket corresponds to
a composite Hilbert space.
Returns
-------
zero_ket : qobj
Zero ket on given Hilbert space.
"""
return Qobj(sp.csr_matrix((N, 1), dtype=complex), dims=dims)

View File

@ -1,328 +0,0 @@
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
# pylint: disable=invalid-name
"""
Module for super operators.
"""
import numpy as np
from .qobj import Qobj
from .fastsparse import fast_csr_matrix, fast_identity
from .sparse import sp_reshape
# pylint: disable=no-name-in-module, import-error
from .cy.spmath import zcsr_kron
# pylint: disable=dangerous-default-value
def liouvillian(H, c_ops=[], data_only=False, chi=None):
"""Assembles the Liouvillian superoperator from a Hamiltonian
and a ``list`` of collapse operators.
Args:
H (qobj.Qobj): System Hamiltonian.
c_ops (qobj.Qobj or array_like): A single collapse operator
or an array.
data_only (bool): Return data only.
chi (flaot): Multiplication factor.
Returns:
qobj.Qobj: Liouvillian superoperator.
Raises:
ValueError: Chi must be list of len(c_ops).
TypeError: Invalidinput types.
"""
if isinstance(c_ops, (Qobj)):
c_ops = [c_ops]
if chi and len(chi) != len(c_ops):
raise ValueError('chi must be a list with same length as c_ops')
h = None
if H is not None:
h = H
if h.isoper:
op_dims = h.dims
op_shape = h.shape
elif h.issuper:
op_dims = h.dims[0]
op_shape = [np.prod(op_dims[0]), np.prod(op_dims[0])]
else:
raise TypeError("Invalid type for Hamiltonian.")
else:
# no hamiltonian given, pick system size from a collapse operator
if isinstance(c_ops, list) and any(c_ops) > 0:
c = c_ops[0]
if c.isoper:
op_dims = c.dims
op_shape = c.shape
elif c.issuper:
op_dims = c.dims[0]
op_shape = [np.prod(op_dims[0]), np.prod(op_dims[0])]
else:
raise TypeError("Invalid type for collapse operator.")
else:
raise TypeError("Either H or c_ops must be given.")
sop_dims = [[op_dims[0], op_dims[0]], [op_dims[1], op_dims[1]]]
sop_shape = [np.prod(op_dims), np.prod(op_dims)]
spI = fast_identity(op_shape[0])
L = None
if isinstance(H, Qobj):
if H.isoper:
Ht = H.data.T
data = -1j * zcsr_kron(spI, H.data)
data += 1j * zcsr_kron(Ht, spI)
else:
data = H.data
else:
data = fast_csr_matrix(shape=(sop_shape[0], sop_shape[1]))
for idx, c_op in enumerate(c_ops):
c_ = c_op
if c_.issuper:
data = data + c_.data
else:
cd = c_.data.H
c = c_.data
if chi:
data = data + (np.exp(1j * chi[idx]) *
zcsr_kron(c.conj(), c))
else:
data = data + zcsr_kron(c.conj(), c)
cdc = cd * c
cdct = cdc.T
data = data - 0.5 * zcsr_kron(spI, cdc)
data = data - 0.5 * zcsr_kron(cdct, spI)
if data_only:
return data
else:
L = Qobj()
L.dims = sop_dims
L.data = data
L.superrep = 'super'
return L
def lindblad_dissipator(a, b=None, data_only=False, chi=None):
"""
Lindblad dissipator (generalized) for a single pair of collapse operators
(a, b), or for a single collapse operator (a) when b is not specified:
.. math::
\\mathcal{D}[a,b]\\rho = a \\rho b^\\dagger -
\\frac{1}{2}a^\\dagger b\\rho - \\frac{1}{2}\\rho a^\\dagger b
Args:
a (Qobj): Left part of collapse operator.
b (Qobj): Right part of collapse operator. If not specified,
b defaults to a.
data_only (bool): Return data only.
chi (flaot): Multiplication factor.
Returns:
Qobj: Lindblad dissipator superoperator.
"""
if b is None:
b = a
ad_b = a.dag() * b
if chi:
D = spre(a) * spost(b.dag()) * np.exp(1j * chi) \
- 0.5 * spre(ad_b) - 0.5 * spost(ad_b)
else:
D = spre(a) * spost(b.dag()) - 0.5 * spre(ad_b) - 0.5 * spost(ad_b)
return D.data if data_only else D
def operator_to_vector(op):
"""
Create a vector representation of a quantum operator given
the matrix representation.
"""
q = Qobj()
q.dims = [op.dims, [1]]
q.data = sp_reshape(op.data.T, (np.prod(op.shape), 1))
return q
def vector_to_operator(op):
"""
Create a matrix representation given a quantum operator in
vector form.
"""
q = Qobj()
q.dims = op.dims[0]
n = int(np.sqrt(op.shape[0]))
q.data = sp_reshape(op.data.T, (n, n)).T
return q
def mat2vec(mat):
"""
Private function reshaping matrix to vector.
"""
return mat.T.reshape(np.prod(np.shape(mat)), 1)
def vec2mat(vec):
"""
Private function reshaping vector to matrix.
"""
n = int(np.sqrt(len(vec)))
return vec.reshape((n, n)).T
def vec2mat_index(N, I):
"""
Convert a vector index to a matrix index pair that is compatible with the
vector to matrix rearrangement done by the vec2mat function.
"""
j = int(I / N)
i = I - N * j
return i, j
def mat2vec_index(N, i, j):
"""
Convert a matrix index pair to a vector index that is compatible with the
matrix to vector rearrangement done by the mat2vec function.
"""
return i + N * j
def spost(A):
"""Superoperator formed from post-multiplication by operator A
Args:
A (Qobj): Quantum operator for post multiplication.
Returns:
Qobj: Superoperator formed from input qauntum object.
Raises:
TypeError: Invalid inputs.
"""
if not isinstance(A, Qobj):
raise TypeError('Input is not a quantum object')
if not A.isoper:
raise TypeError('Input is not a quantum operator')
S = Qobj(isherm=A.isherm, superrep='super')
S.dims = [[A.dims[0], A.dims[1]], [A.dims[0], A.dims[1]]]
S.data = zcsr_kron(A.data.T,
fast_identity(np.prod(A.shape[0])))
return S
def spre(A):
"""Superoperator formed from pre-multiplication by operator A.
Args:
A (Qobj): Quantum operator for pre-multiplication.
Returns:
Qobj: Superoperator formed from input quantum object.
Raises:
TypeError: Invalid input type.
"""
if not isinstance(A, Qobj):
raise TypeError('Input is not a quantum object')
if not A.isoper:
raise TypeError('Input is not a quantum operator')
S = Qobj(isherm=A.isherm, superrep='super')
S.dims = [[A.dims[0], A.dims[1]], [A.dims[0], A.dims[1]]]
S.data = zcsr_kron(fast_identity(np.prod(A.shape[1])), A.data)
return S
def _drop_projected_dims(dims):
"""
Eliminate subsystems that has been collapsed to only one state due to
a projection.
"""
return [d for d in dims if d != 1]
def sprepost(A, B):
"""Superoperator formed from pre-multiplication by operator A and post-
multiplication of operator B.
Parameters
----------
A : Qobj or QobjEvo
Quantum operator for pre-multiplication.
B : Qobj or QobjEvo
Quantum operator for post-multiplication.
Returns
--------
super : Qobj or QobjEvo
Superoperator formed from input quantum objects.
"""
dims = [[_drop_projected_dims(A.dims[0]),
_drop_projected_dims(B.dims[1])],
[_drop_projected_dims(A.dims[1]),
_drop_projected_dims(B.dims[0])]]
data = zcsr_kron(B.data.T, A.data)
return Qobj(data, dims=dims, superrep='super')

View File

@ -1,88 +0,0 @@
# Openpulse
This simulates job using the open pulse format (see the spec).
## Example
The Hamiltonian `H=-w_0 * \sigma_z/2 + w_1 * cos(w t + \phi) * \sigma_x/2` may be specified as:
```
hamiltonian = {}
hamiltonian['h_str'] = []
#Q0 terms
hamiltonian['h_str'].append('-0.5*w0*Z0')
hamiltonian['h_str'].append('0.5*w1*X0||D0')
hamiltonian['vars'] = {'w0': (add w0 val here), 'w1': (add w1 value here)}
# set the qubit dimension to 2
hamiltonian['qub'] = {'0': 2}
```
This Hamiltonian has a closed form in the rotating frame specified in the solving section.
Note: Variable names must be lowercase (uppercase is reserved for operators).
## Hamiltonian
One of the main components of the open pulse simulator is the specification
of the Hamiltonian. The Hamiltonian dictates how the simulation will evolve
the dynamics based on the pulse schedule.
There are two allowed components to a simulation, *qubits* and *oscillators*.
*Qubits* are the objects that will be measured at the end of the simulation.
The operators for qubits are defined as Pauli's but if the number of levels
is defined to be greater than 2 these will be internally converted to
creation and annhilation operators.
The Hamiltonian is a dictionary comprised of
- `h_str`: Definition of the Hamiltonian in terms of operators, drives and
coefficients
- `vars`: Numeric values for the variables in the Hamiltonian
- `qubs`: Dictionary indicating the number of levels for each qubit to
use in the simulation
- `osc`: Dictionary indicating the number of levels for each oscillator to
use in the simulation
There must be qubits, but there does not need to be oscillators. Measurements
are given by the qubit state; the measurement process is not simulated.
### `h_str` Syntax
The `h_str` is a list of strings which indicate various terms in the
Hamiltonian. These can have the form
`<op>||<ch>` or `<op>`
where `<op>` is a string form of an operator with coefficients and `<ch>`
indicates one of the time-dependent drive channels (e.g. `D2` or `U10`).
Additionally, there is a sum form
`_SUM[i,n1,n2,F[{i}]]` where {i} is replaced in each loop by the value.
Available operators are:
{'X': sigmax, 'Y': sigmay, 'Z': sigmaz,
'Sp': creation (sigma plus), 'Sm': destruction (sigma minus), 'I': identity,
'O': number, 'P': projection, 'A': destruction, 'C': creation, 'N': number op}
The following functions are also available:
{'cos': cos, 'sin': sin, 'exp': exp,
'sqrt': sqrt, 'conj': complex conjugate, 'dag': dagger (Hermitian conjugate)}
## Solving
The solver takes the Hamiltonian (`H(t=0)`) and sets all drive/control channels to zero.
Consider `H_d` to be the diagonal elements of `H(t=0)`, then the transformation applied to the statevector `\psi` is
`U=e^{-i H_d t/hbar}` (`\psi \rightarrow U \psi). For all drive/control channels, the LO is applied so
`d(t) -> D(t)e^{-i w t}`. The LO frequency `w` may be set in the pulse. If the drive is associated with some operator *B* then
the upper triangular part of *B* is multiplied by `d(t)` and the lower triangular part
by `d*(t)`. This ensures the Hamiltonian is Hermitian and also that in the transformed
frame that a resonant pulse is close to DC.
### Measurement
The measurement operators are the projections onto the 1 excitation subsystem for qubit `l`
where qubit `l` is defined by diagonalizing `H(t=0)` (i.e. the dressed basis).
There are three measurement levels that return the data.
Measurement level `0` gives the raw data.
Measurement level `1` gives complex numbers (IQ values).
Measurement level `2` gives the discriminated states, `|0>` and `|1>`.

View File

@ -1,284 +0,0 @@
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.
# All rights reserved.
# pylint: disable=invalid-name
"""OpenPulse runtime code generator"""
import os
import sys
from ..qutip_lite import cy
from . import settings
_cython_path = os.path.abspath(cy.__file__).replace('__init__.py', '')
_cython_path = _cython_path.replace("\\", "/")
_include_string = "'" + _cython_path + "complex_math.pxi'"
class OPCodegen():
"""
Class for generating cython code files at runtime.
"""
def __init__(self, op_system):
sys.path.append(os.getcwd())
# Hamiltonian time-depdendent pieces
self.op_system = op_system
self.dt = op_system.dt
self.num_ham_terms = self.op_system.global_data['num_h_terms']
# Code generator properties
self._file = None
self.code = [] # strings to be written to file
self.level = 0 # indent level
self.spline_count = 0
def write(self, string):
"""write lines of code to self.code"""
self.code.append(" " * self.level + string + "\n")
def file(self, filename):
"""open file called filename for writing"""
self._file = open(filename, "w")
def generate(self, filename="rhs.pyx"):
"""generate the file"""
for line in cython_preamble():
self.write(line)
# write function for Hamiltonian terms (there is always at least one
# term)
for line in cython_checks() + self.ODE_func_header():
self.write(line)
self.indent()
for line in func_header(self.op_system):
self.write(line)
for line in self.channels():
self.write(line)
for line in self.func_vars():
self.write(line)
for line in self.func_end():
self.write(line)
self.dedent()
self.file(filename)
self._file.writelines(self.code)
self._file.close()
settings.CGEN_NUM += 1
def indent(self):
"""increase indention level by one"""
self.level += 1
def dedent(self):
"""decrease indention level by one"""
if self.level == 0:
raise SyntaxError("Error in code generator")
self.level -= 1
def ODE_func_header(self):
"""Creates function header for time-dependent ODE RHS."""
func_name = "def cy_td_ode_rhs("
# strings for time and vector variables
input_vars = ("\n double t" +
",\n complex[::1] vec")
# add diagonal hamiltonian terms
input_vars += (",\n double[::1] energ")
for k in range(self.num_ham_terms):
input_vars += (",\n " +
"complex[::1] data%d, " % k +
"int[::1] idx%d, " % k +
"int[::1] ptr%d" % k)
# Add global vaiables
input_vars += (",\n " + "complex[::1] pulse_array")
input_vars += (",\n " + "unsigned int[::1] pulse_indices")
# Add per experiment variables
for key in self.op_system.channels.keys():
input_vars += (",\n " + "double[::1] %s_pulses" % key)
input_vars += (",\n " + "double[::1] %s_fc" % key)
# add Hamiltonian variables
for key in self.op_system.vars.keys():
input_vars += (",\n " + "complex %s" % key)
# add Freq variables
for key in self.op_system.freqs.keys():
input_vars += (",\n " + "double %s_freq" % key)
# register
input_vars += (",\n " + "unsigned char[::1] register")
func_end = "):"
return [func_name + input_vars + func_end]
def channels(self):
"""Write out the channels
"""
channel_lines = [""]
channel_lines.append("# Compute complex channel values at time `t`")
for chan, idx in self.op_system.channels.items():
chan_str = "%s = chan_value(t, %s, %s_freq, " % (chan, idx, chan) + \
"%s_pulses, pulse_array, pulse_indices, " % chan + \
"%s_fc, register)" % (chan)
channel_lines.append(chan_str)
channel_lines.append('')
return channel_lines
def func_vars(self):
"""Writes the variables and spmv parts"""
func_vars = []
sp1 = " "
sp2 = sp1 + sp1
func_vars.append("# Eval the time-dependent terms and do SPMV.")
for idx in range(len(self.op_system.system) + 1):
if (idx == len(self.op_system.system) and
(len(self.op_system.system) < self.num_ham_terms)):
# this is the noise term
term = [1.0, 1.0]
elif idx < len(self.op_system.system):
term = self.op_system.system[idx]
else:
continue
if isinstance(term, list) or term[1]:
func_vars.append("td%s = %s" % (idx, term[1]))
else:
func_vars.append("td%s = 1.0" % (idx))
func_vars.append("if abs(td%s) > 1e-15:" % idx)
func_vars.append(sp1 + "for row in range(num_rows):")
func_vars.append(sp2 + "dot = 0;")
func_vars.append(sp2 + "row_start = ptr%d[row];" % idx)
func_vars.append(sp2 + "row_end = ptr%d[row+1];" % idx)
func_vars.append(sp2 + "for jj in range(row_start,row_end):")
func_vars.append(sp1 +
sp2 +
"osc_term = exp(1j*(energ[row]-energ[idx%d[jj]])*t)" % idx)
func_vars.append(sp1 + sp2 + "if row<idx%d[jj]:" % idx)
func_vars.append(sp2 + sp2 + "coef = conj(td%d)" % idx)
func_vars.append(sp1 + sp2 + "else:")
func_vars.append(sp2 + sp2 + "coef = td%d" % idx)
func_vars.append(sp1 + sp2 +
"dot += coef*osc_term*data%d[jj]*vec[idx%d[jj]];" % (idx, idx))
func_vars.append(sp2 + "out[row] += dot;")
# remove the diagonal terms
func_vars.append("for row in range(num_rows):")
func_vars.append(sp1 + "out[row] += 1j*energ[row]*vec[row];")
return func_vars
def func_end(self):
"""End of the RHS function.
"""
end_str = [""]
end_str.append("# Convert to NumPy array, grab ownership, and return.")
end_str.append("cdef np.npy_intp dims = num_rows")
temp_str = "cdef np.ndarray[complex, ndim=1, mode='c'] arr_out = "
temp_str += "np.PyArray_SimpleNewFromData(1, &dims, np.NPY_COMPLEX128, out)"
end_str.append(temp_str)
end_str.append("PyArray_ENABLEFLAGS(arr_out, np.NPY_OWNDATA)")
end_str.append("return arr_out")
return end_str
def func_header(op_system):
"""Header for the RHS function.
"""
func_vars = ["", 'cdef size_t row', 'cdef unsigned int row_start, row_end',
'cdef unsigned int num_rows = vec.shape[0]',
'cdef double complex dot, osc_term, coef',
"cdef double complex * " +
'out = <complex *>PyDataMem_NEW(num_rows * sizeof(complex))',
'memset(&out[0],0,num_rows * sizeof(complex))'
]
func_vars.append("")
for val in op_system.channels:
func_vars.append("cdef double complex %s" % val)
for kk in range(len(op_system.system) + 1):
func_vars.append("cdef double complex td%s" % kk)
return func_vars
def cython_preamble():
"""
Returns list of code segments for Cython preamble.
"""
preamble = ["""\
#!python
#cython: language_level=3
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
import numpy as np
cimport numpy as np
cimport cython
np.import_array()
cdef extern from "numpy/arrayobject.h" nogil:
void PyDataMem_NEW(size_t size)
void PyArray_ENABLEFLAGS(np.ndarray arr, int flags)
cdef extern from "<complex>" namespace "std" nogil:
double complex exp(double complex x)
cdef extern from "<complex>" namespace "std" nogil:
double complex conj(double complex x)
from qiskit.providers.aer.pulse.qutip_lite.cy.spmatfuncs cimport spmvpy
from libc.math cimport pi
from qiskit.providers.aer.pulse.cy.channel_value cimport chan_value
from libc.string cimport memset
include """ + _include_string + """
"""]
return preamble
def cython_checks():
"""
List of strings that turn off Cython checks.
"""
return ["""@cython.cdivision(True)
@cython.boundscheck(False)
@cython.wraparound(False)"""]

View File

@ -1,126 +0,0 @@
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name
"""Data configuration module"""
import numpy as np
pi = np.pi
def op_data_config(op_system):
""" Preps the data for the opsolver.
Everything is stored in the passed op_system.
Args:
op_system (OPSystem): An openpulse system.
"""
num_h_terms = len(op_system.system)
H = [hpart[0] for hpart in op_system.system]
op_system.global_data['num_h_terms'] = num_h_terms
# take care of collapse operators, if any
op_system.global_data['c_num'] = 0
if op_system.noise:
op_system.global_data['c_num'] = len(op_system.noise)
op_system.global_data['num_h_terms'] += 1
op_system.global_data['c_ops_data'] = []
op_system.global_data['c_ops_ind'] = []
op_system.global_data['c_ops_ptr'] = []
op_system.global_data['n_ops_data'] = []
op_system.global_data['n_ops_ind'] = []
op_system.global_data['n_ops_ptr'] = []
op_system.global_data['h_diag_elems'] = op_system.h_diag
# if there are any collapse operators
H_noise = 0
for kk in range(op_system.global_data['c_num']):
c_op = op_system.noise[kk]
n_op = c_op.dag() * c_op
# collapse ops
op_system.global_data['c_ops_data'].append(c_op.data.data)
op_system.global_data['c_ops_ind'].append(c_op.data.indices)
op_system.global_data['c_ops_ptr'].append(c_op.data.indptr)
# norm ops
op_system.global_data['n_ops_data'].append(n_op.data.data)
op_system.global_data['n_ops_ind'].append(n_op.data.indices)
op_system.global_data['n_ops_ptr'].append(n_op.data.indptr)
# Norm ops added to time-independent part of
# Hamiltonian to decrease norm
H_noise -= 0.5j * n_op
if H_noise:
H = H + [H_noise]
# construct data sets
op_system.global_data['h_ops_data'] = [-1.0j * hpart.data.data
for hpart in H]
op_system.global_data['h_ops_ind'] = [hpart.data.indices for hpart in H]
op_system.global_data['h_ops_ptr'] = [hpart.data.indptr for hpart in H]
# setup ode args string
ode_var_str = ""
# diagonal elements
ode_var_str += "global_data['h_diag_elems'], "
# Hamiltonian data
for kk in range(op_system.global_data['num_h_terms']):
h_str = "global_data['h_ops_data'][%s], " % kk
h_str += "global_data['h_ops_ind'][%s], " % kk
h_str += "global_data['h_ops_ptr'][%s], " % kk
ode_var_str += h_str
# Add pulse array and pulse indices
ode_var_str += "global_data['pulse_array'], "
ode_var_str += "global_data['pulse_indices'], "
var_list = list(op_system.vars.keys())
final_var = var_list[-1]
freq_list = list(op_system.freqs.keys())
final_freq = freq_list[-1]
# Now add channel variables
chan_list = list(op_system.channels.keys())
final_chan = chan_list[-1]
for chan in chan_list:
ode_var_str += "exp['channels']['%s'][0], " % chan
ode_var_str += "exp['channels']['%s'][1]" % chan
if chan != final_chan or var_list:
ode_var_str += ', '
# now do the variables
for idx, var in enumerate(var_list):
ode_var_str += "global_data['vars'][%s]" % idx
if var != final_var or freq_list:
ode_var_str += ', '
# now do the freq
for idx, freq in enumerate(freq_list):
ode_var_str += "global_data['freqs'][%s]" % idx
if freq != final_freq:
ode_var_str += ', '
# Add register
ode_var_str += ", register"
op_system.global_data['string'] = ode_var_str
# Convert inital state to flat array in global_data
op_system.global_data['initial_state'] = \
op_system.initial_state.full().ravel()

View File

@ -1,279 +0,0 @@
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=no-name-in-module, import-error, invalid-name
"""The main OpenPulse solver routine.
"""
import time
import numpy as np
from scipy.linalg.blas import get_blas_funcs
from qiskit.tools.parallel import parallel_map, CPU_COUNT
from ..qutip_lite.cy.spmatfuncs import cy_expect_psi_csr
from ..qutip_lite.cy.utilities import _cython_build_cleanup
from ..qobj.operators import apply_projector
from .rhs_utils import _op_generate_rhs, _op_func_load
from .data_config import op_data_config
from .unitary import unitary_evolution
from .monte_carlo import monte_carlo
dznrm2 = get_blas_funcs("znrm2", dtype=np.float64)
#
# Internal, global variables for storing references to dynamically loaded
# cython functions
#
_cy_rhs_func = None
def opsolve(op_system):
"""Opsolver
"""
if not op_system.initial_state.isket:
raise Exception("Initial state must be a state vector.")
# set num_cpus to the value given in settings if none in Options
if not op_system.ode_options.num_cpus:
op_system.ode_options.num_cpus = CPU_COUNT
# build Hamiltonian data structures
op_data_config(op_system)
if not op_system.use_cpp_ode_func:
# compile Cython RHS
_op_generate_rhs(op_system)
# Load cython function
_op_func_load(op_system)
# load monte carlo class
montecarlo = OP_mcwf(op_system)
# Run the simulation
out = montecarlo.run()
# Results are stored in ophandler.result
return out
# -----------------------------------------------------------------------------
# MONTE CARLO CLASS
# -----------------------------------------------------------------------------
class OP_mcwf():
"""
Private class for solving Monte Carlo evolution
"""
def __init__(self, op_system):
self.op_system = op_system
# set output variables, even if they are not used to simplify output
# code.
self.output = None
self.collapse_times = None
self.collapse_operator = None
# FOR EVOLUTION WITH COLLAPSE OPERATORS
if not op_system.can_sample:
# preallocate ntraj arrays for state vectors, collapse times, and
# which operator
self.collapse_times = [[] for kk in
range(op_system.global_data['shots'])]
self.collapse_operators = [[] for kk in
range(op_system.global_data['shots'])]
# setup seeds array
if op_system.global_data['seed']:
prng = np.random.RandomState(op_system.global_data['seed'])
else:
prng = np.random.RandomState(
np.random.randint(np.iinfo(np.int32).max - 1))
for exp in op_system.experiments:
exp['seed'] = prng.randint(np.iinfo(np.int32).max - 1)
def run(self):
"""Runs the solver.
"""
map_kwargs = {'num_processes': self.op_system.ode_options.num_cpus}
# exp_results from the solvers return the values of the measurement
# operators
# If no collapse terms, and only measurements at end
# can do a single shot.
# exp_results is a list of '0' and '1'
# where '0' occurs with probability 1-<M>
# and '1' occurs with probability <M>
# M is the measurement operator, which is a projector
# into one of the qubit states (usually |1>)
if self.op_system.can_sample:
start = time.time()
exp_results = parallel_map(unitary_evolution,
self.op_system.experiments,
task_args=(self.op_system,),
**map_kwargs
)
end = time.time()
exp_times = (np.ones(len(self.op_system.experiments)) *
(end - start) / len(self.op_system.experiments))
# need to simulate each trajectory, so shots*len(experiments) times
# Do a for-loop over experiments, and do shots in parallel_map
else:
exp_results = []
exp_times = []
for exp in self.op_system.experiments:
start = time.time()
rng = np.random.RandomState(exp['seed'])
seeds = rng.randint(np.iinfo(np.int32).max - 1,
size=self.op_system.global_data['shots'])
exp_res = parallel_map(monte_carlo,
seeds,
task_args=(exp, self.op_system,),
**map_kwargs)
# exp_results is a list for each shot
# so transform back to an array of shots
exp_res2 = []
for exp_shot in exp_res:
exp_res2.append(exp_shot[0].tolist())
end = time.time()
exp_times.append(end - start)
exp_results.append(np.array(exp_res2))
# format the data into the proper output
all_results = []
for idx_exp, exp in enumerate(self.op_system.experiments):
m_lev = self.op_system.global_data['meas_level']
m_ret = self.op_system.global_data['meas_return']
# populate the results dictionary
results = {'seed_simulator': exp['seed'],
'shots': self.op_system.global_data['shots'],
'status': 'DONE',
'success': True,
'time_taken': exp_times[idx_exp],
'header': exp['header'],
'meas_level': m_lev,
'meas_return': m_ret,
'data': {}}
if self.op_system.can_sample:
memory = exp_results[idx_exp][0]
results['data']['statevector'] = []
for coef in exp_results[idx_exp][1]:
results['data']['statevector'].append([np.real(coef),
np.imag(coef)])
results['header']['ode_t'] = exp_results[idx_exp][2]
else:
memory = exp_results[idx_exp]
# meas_level 2 return the shots
if m_lev == 2:
# convert the memory **array** into a n
# integer
# e.g. [1,0] -> 2
int_mem = memory.dot(np.power(2.0,
np.arange(memory.shape[1]))).astype(int)
# if the memory flag is set return each shot
if self.op_system.global_data['memory']:
hex_mem = [hex(val) for val in int_mem]
results['data']['memory'] = hex_mem
# Get hex counts dict
unique = np.unique(int_mem, return_counts=True)
hex_dict = {}
for kk in range(unique[0].shape[0]):
key = hex(unique[0][kk])
hex_dict[key] = unique[1][kk]
results['data']['counts'] = hex_dict
# meas_level 1 returns the <n>
elif m_lev == 1:
if m_ret == 'avg':
memory = [np.mean(memory, 0)]
# convert into the right [real, complex] pair form for json
# this should be cython?
results['data']['memory'] = []
for mem_shot in memory:
results['data']['memory'].append([])
for mem_slot in mem_shot:
results['data']['memory'][-1].append(
[np.real(mem_slot), np.imag(mem_slot)])
if m_ret == 'avg':
results['data']['memory'] = results['data']['memory'][0]
all_results.append(results)
if not self.op_system.use_cpp_ode_func:
_cython_build_cleanup(self.op_system.global_data['rhs_file_name'])
return all_results
# Measurement
def _proj_measurement(pid, ophandler, tt, state, memory, register=None):
"""
Projection measurement of quantum state
"""
prng = np.random.RandomState(np.random.randint(np.iinfo(np.int32).max - 1))
qubits = []
results = []
for key, acq in ophandler._acqs.items():
if pid < 0:
if any(acq.m_slot):
mem_slot_id = acq.m_slot[-1]
else:
continue
reg_slot_id = None
else:
if tt in acq.t1:
mem_slot_id = acq.m_slot[acq.t1.index(tt)]
reg_slot_id = acq.r_slot[acq.t1.index(tt)]
else:
continue
oper = ophandler._measure_ops[acq.name]
p_q = cy_expect_psi_csr(oper.data.data,
oper.data.indices,
oper.data.indptr,
state,
1)
# level2 measurement
rnd = prng.rand()
if rnd <= p_q:
outcome = 1
else:
outcome = 0
memory[mem_slot_id][0] = outcome
if reg_slot_id is not None and register is not None:
register[reg_slot_id] = outcome
qubits.append(key)
results.append(outcome)
# projection
if any(qubits):
psi_proj = apply_projector(qubits, results, ophandler.h_qub,
ophandler.h_osc, state)
else:
psi_proj = state
return psi_proj

View File

@ -1,54 +0,0 @@
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Routines for generating and loading runtime RHS function"""
import os
from .codegen import OPCodegen
from . import settings as op_set
def _op_generate_rhs(op_system):
""" Generates the RHS Cython file for solving the sytem
described in op_system
Args:
op_system (OPSystem): An OpenPulse system object.
"""
name = "rhs" + str(os.getpid()) + str(op_set.CGEN_NUM) + '_op'
op_system.global_data['rhs_file_name'] = name
cgen = OPCodegen(op_system)
cgen.generate(name + ".pyx")
def _op_func_load(op_system):
"""Loads the Cython function defined in the file
`rhs_file_name.pyx` where `rhs_file_name` is
stored in the op_system.
Args:
op_system (OPSystem): An OpenPulse system object.
"""
if op_system.use_cpp_ode_func:
# pylint: disable=no-name-in-module, import-error, import-outside-toplevel
from ..cy.numeric_integrator_wrapper import td_ode_rhs_static
op_system.global_data['rhs_func'] = td_ode_rhs_static
else:
code = compile('from ' + op_system.global_data['rhs_file_name'] +
' import cy_td_ode_rhs', '<string>', 'exec')
# pylint: disable=exec-used
exec(code, globals())
# pylint: disable=undefined-variable
op_system.global_data['rhs_func'] = cy_td_ode_rhs

View File

@ -1,117 +0,0 @@
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=unused-variable, no-name-in-module, protected-access,
# pylint: disable=invalid-name, import-error, exec-used
"""Module for unitary pulse evolution.
"""
import logging
import numpy as np
from scipy.integrate import ode
from scipy.linalg.blas import get_blas_funcs
from ..cy.measure import occ_probabilities, write_shots_memory
dznrm2 = get_blas_funcs("znrm2", dtype=np.float64)
def unitary_evolution(exp, op_system):
"""
Calculates evolution when there is no noise,
or any measurements that are not at the end
of the experiment.
Args:
exp (dict): Dictionary of experimental pulse and fc
op_system (OPSystem): Global OpenPulse system settings
Returns:
array: Memory of shots.
Raises:
Exception: Error in ODE solver.
"""
global_data = op_system.global_data
ode_options = op_system.ode_options
cy_rhs_func = global_data['rhs_func']
rng = np.random.RandomState(exp['seed'])
tlist = exp['tlist']
snapshots = []
shots = global_data['shots']
# Init memory
memory = np.zeros((shots, global_data['memory_slots']),
dtype=np.uint8)
# Init register
register = np.zeros(global_data['n_registers'], dtype=np.uint8)
num_channels = len(exp['channels'])
ODE = ode(cy_rhs_func)
if op_system.use_cpp_ode_func:
# Don't know how to use OrderedDict type on Cython, so transforming it to dict
channels = dict(op_system.channels)
ODE.set_f_params(global_data, exp, op_system.system, channels, register)
else:
_inst = 'ODE.set_f_params(%s)' % global_data['string']
logging.debug("Unitary Evolution: %s\n\n", _inst)
code = compile(_inst, '<string>', 'exec')
exec(code) # pylint disable=exec-used
ODE.set_integrator('zvode',
method=ode_options.method,
order=ode_options.order,
atol=ode_options.atol,
rtol=ode_options.rtol,
nsteps=ode_options.nsteps,
first_step=ode_options.first_step,
min_step=ode_options.min_step,
max_step=ode_options.max_step)
if not ODE._y:
ODE.t = 0.0
ODE._y = np.array([0.0], complex)
ODE._integrator.reset(len(ODE._y), ODE.jac is not None)
# Since all experiments are defined to start at zero time.
ODE.set_initial_value(global_data['initial_state'], 0)
for time in tlist[1:]:
ODE.integrate(time, step=0)
if ODE.successful():
psi = ODE.y / dznrm2(ODE.y)
else:
err_msg = 'ZVODE exited with status: %s' % ODE.get_return_code()
raise Exception(err_msg)
# Do any snapshots here
# set channel and frame change indexing arrays
# Do final measurement at end, only take acquire channels at the end
psi_rot = np.exp(-1j * global_data['h_diag_elems'] * ODE.t)
psi *= psi_rot
qubits = []
memory_slots = []
for acq in exp['acquire']:
if acq[0] == tlist[-1]:
qubits += list(acq[1])
memory_slots += list(acq[2])
qubits = np.array(qubits, dtype='uint32')
memory_slots = np.array(memory_slots, dtype='uint32')
probs = occ_probabilities(qubits, psi, global_data['measurement_ops'])
rand_vals = rng.rand(memory_slots.shape[0] * shots)
write_shots_memory(memory, memory_slots, probs, rand_vals)
return [memory, psi, ODE.t]

View File

@ -1,39 +0,0 @@
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.
# All rights reserved.
# pylint: disable=no-value-for-parameter, invalid-name
"""Hack for the ZVODE solver to do the correct stepping without restart."""
from scipy.integrate._ode import zvode
class qiskit_zvode(zvode):
"""Modifies the stepper for ZVODE so that
it always stops at a given time in tlist.
By default, it over shoots the time, which
of course is just stupid.
"""
def step(self, *args):
itask = self.call_args[2]
self.rwork[0] = args[4]
self.call_args[2] = 5
r = self.run(*args)
self.call_args[2] = itask
return r

View File

@ -2,7 +2,7 @@
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
# (C) Copyright IBM 2018, 2019, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
@ -11,3 +11,6 @@
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Models for objects built around tensor product systems.
"""

View File

@ -2,7 +2,7 @@
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
# (C) Copyright IBM 2018, 2019, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory

View File

@ -2,7 +2,7 @@
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
# (C) Copyright IBM 2018, 2019, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
@ -18,8 +18,8 @@
from collections import OrderedDict
import numpy as np
import numpy.linalg as la
from .qobj.opparse import HamiltonianParser
from ..aererror import AerError
from qiskit.providers.aer.aererror import AerError
from .string_model_parser.string_model_parser import HamiltonianParser
class HamiltonianModel():

View File

@ -2,7 +2,7 @@
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
# (C) Copyright IBM 2018, 2019, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
@ -18,8 +18,8 @@
from warnings import warn
from collections import OrderedDict
from qiskit.providers import BaseBackend
from qiskit.providers.aer.aererror import AerError
from .hamiltonian_model import HamiltonianModel
from ..aererror import AerError
class PulseSystemModel():

View File

@ -2,7 +2,7 @@
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
# (C) Copyright IBM 2018, 2019, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
@ -11,3 +11,6 @@
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Folder for string model parsers.
"""

View File

@ -0,0 +1,34 @@
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Functions for applying scalar functions in __fundict to the operators
represented in qutip Qobj.
"""
def dag(qobj):
""" Qiskit wrapper of adjoint
"""
return qobj.dag()
def apply_func(name, qobj):
""" Apply function of given name, or do nothing if func not found
"""
return __funcdict.get(name, lambda x: x)(qobj)
# pylint: disable=invalid-name
__funcdict = {'dag': dag}

View File

@ -0,0 +1,79 @@
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name
"""Module for creating quantum operators."""
from ...qutip_extra_lite import qobj_generators
def gen_oper(opname, index, h_osc, h_qub, states=None):
"""Generate quantum operators.
Args:
opname (str): Name of the operator to be returned.
index (int): Index of operator.
h_osc (dict): Dimension of oscillator subspace
h_qub (dict): Dimension of qubit subspace
states (tuple): State indices of projection operator.
Returns:
Qobj: quantum operator for target qubit.
"""
opr_tmp = None
# get number of levels in Hilbert space
if opname in ['X', 'Y', 'Z', 'Sp', 'Sm', 'I', 'O', 'P']:
is_qubit = True
dim = h_qub.get(index, 2)
if opname in ['X', 'Y', 'Z'] and dim > 2:
if opname == 'X':
opr_tmp = qobj_generators.get_oper('A', dim) + qobj_generators.get_oper('C', dim)
elif opname == 'Y':
opr_tmp = (-1j * qobj_generators.get_oper('A', dim) +
1j * qobj_generators.get_oper('C', dim))
else:
opr_tmp = (qobj_generators.get_oper('I', dim) -
2 * qobj_generators.get_oper('N', dim))
else:
is_qubit = False
dim = h_osc.get(index, 5)
if opname == 'P':
opr_tmp = qobj_generators.get_oper(opname, dim, states)
else:
if opr_tmp is None:
opr_tmp = qobj_generators.get_oper(opname, dim)
# reverse sort by index
rev_h_osc = sorted(h_osc.items(), key=lambda x: x[0])[::-1]
rev_h_qub = sorted(h_qub.items(), key=lambda x: x[0])[::-1]
# osc_n * … * osc_0 * qubit_n * … * qubit_0
opers = []
for ii, dd in rev_h_osc:
if ii == index and not is_qubit:
opers.append(opr_tmp)
else:
opers.append(qobj_generators.qeye(dd))
for ii, dd in rev_h_qub:
if ii == index and is_qubit:
opers.append(opr_tmp)
else:
opers.append(qobj_generators.qeye(dd))
return qobj_generators.tensor(opers)

View File

@ -2,7 +2,7 @@
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
# (C) Copyright IBM 2018, 2019, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
@ -13,14 +13,14 @@
# that they have been altered from the originals.
# pylint: disable=invalid-name
""" The openpulse simulator parser"""
"""Parser for the string specification of Hamiltonians and noise models"""
import re
import copy
from collections import namedtuple, OrderedDict
import numpy as np
from .op_qobj import get_func
from .operators import gen_oper
from .apply_str_func_to_qobj import apply_func
from .qobj_from_string import gen_oper
Token = namedtuple('Token', ('type', 'name'))
@ -277,7 +277,7 @@ class HamiltonianParser:
elif token.name == '/':
stack.append(op1 / op2)
elif token.type in ['Func', 'Ext']:
stack.append(get_func(token.name, stack.pop(-1)))
stack.append(apply_func(token.name, stack.pop(-1)))
else:
raise Exception('Invalid token %s is found' % token.name)

View File

@ -1,14 +1,19 @@
# Cython OP extensions
include(Linter)
include(cython_utils)
find_package(Pybind11 REQUIRED)
find_package(NumPy REQUIRED)
# We need to remove the -static flag, because Python Extension system only supports
# dynamic linked libraries, but we want to build a shared libraries with the least
# dependencies we can, so some of these dependencies are linked statically into our
# shared library.
string(REPLACE " -static " "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
set(CYTHON_INSTALL_DIR "qiskit/providers/aer/pulse/cy/")
add_cython_module(numeric_integrator_wrapper numeric_integrator.cpp)
add_cython_module(test_python_to_cpp)
function(numpy_pybind11_add_module target_name)
basic_pybind11_add_module(${target_name} ${ARGN})
target_include_directories(${target_name} PRIVATE ${AER_SIMULATOR_CPP_SRC_DIR}
PRIVATE ${AER_SIMULATOR_CPP_EXTERNAL_LIBS} PRIVATE ${NumPy_INCLUDE_DIRS})
target_link_libraries(${target_name} ${AER_LIBRARIES})
endfunction()
numpy_pybind11_add_module(pulse_utils pulse_utils_bindings.cpp pulse_utils.cpp numeric_integrator.cpp zspmv.cpp)
numpy_pybind11_add_module(test_python_to_cpp test_python_to_cpp.cpp)
install(TARGETS pulse_utils test_python_to_cpp LIBRARY DESTINATION "qiskit/providers/aer/pulse/de_solvers/")

View File

@ -95,36 +95,18 @@ complex_t chan_value(
return out;
}
PyArrayObject * create_py_array_from_vector(
complex_t * out,
int num_rows){
npy_intp dims = num_rows;
PyArrayObject * array = reinterpret_cast<PyArrayObject *>(PyArray_SimpleNewFromData(1, &dims, NPY_COMPLEX128, out));
PyArray_ENABLEFLAGS(array, NPY_OWNDATA);
#ifdef DEBUG
CALLGRIND_STOP_INSTRUMENTATION;
CALLGRIND_DUMP_STATS;
#endif
return array;
}
PyArrayObject * td_ode_rhs(
double t,
PyArrayObject * py_vec,
PyObject * py_global_data,
PyObject * py_exp,
PyObject * py_system,
PyObject * py_channels,
PyObject * py_register){
py::array_t<complex_t> td_ode_rhs(double t,
py::array_t<complex_t> the_vec,
py::object the_global_data,
py::object the_exp,
py::object the_system,
py::object the_channels,
py::object the_reg)
{
#ifdef DEBUG
CALLGRIND_START_INSTRUMENTATION;
#endif
import_array();
// I left this commented on porpose so we can use logging eventually
// This is just a RAII for the logger
//const Unregister unregister;
@ -133,6 +115,12 @@ PyArrayObject * td_ode_rhs(
//spdlog::set_level(spdlog::level::debug); // Set global log level to debug
//spdlog::flush_on(spdlog::level::debug);
PyArrayObject * py_vec = reinterpret_cast<PyArrayObject *>(the_vec.ptr());
PyObject * py_global_data = the_global_data.ptr();
PyObject * py_exp = the_exp.ptr();
PyObject * py_system = the_system.ptr();
PyObject * py_register = the_reg.ptr();
if(py_vec == nullptr ||
py_global_data == nullptr ||
py_exp == nullptr ||
@ -183,17 +171,13 @@ PyArrayObject * td_ode_rhs(
auto idxs = get_vec_from_dict_item<NpArray<int>>(py_global_data, "h_ops_ind");
auto ptrs = get_vec_from_dict_item<NpArray<int>>(py_global_data, "h_ops_ptr");
auto energy = get_value_from_dict_item<NpArray<double>>(py_global_data, "h_diag_elems");
for(const auto& idx_sys : enumerate(systems)){
auto sys_index = idx_sys.first;
auto sys = idx_sys.second;
for(int h_idx = 0; h_idx < num_h_terms; h_idx++){
// TODO: Refactor
std::string term;
if(sys_index == systems.size() && num_h_terms > systems.size()){
if(h_idx == systems.size() && num_h_terms > systems.size()){
term = "1.0";
}else if(sys_index < systems.size()){
//term = sys.second;
term = sys.term;
}else if(h_idx < systems.size()){
term = systems[h_idx].term;
}else{
continue;
}
@ -202,16 +186,16 @@ PyArrayObject * td_ode_rhs(
if(std::abs(td) > 1e-15){
for(auto i=0; i<num_rows; i++){
complex_t dot = {0., 0.};
auto row_start = ptrs[sys_index][i];
auto row_end = ptrs[sys_index][i+1];
auto row_start = ptrs[h_idx][i];
auto row_end = ptrs[h_idx][i+1];
for(auto j = row_start; j<row_end; ++j){
auto tmp_idx = idxs[sys_index][j];
auto tmp_idx = idxs[h_idx][j];
auto osc_term =
std::exp(
complex_t(0.,1.) * (energy[i] - energy[tmp_idx]) * t
);
complex_t coef = (i < tmp_idx ? std::conj(td) : td);
dot += coef * osc_term * datas[sys_index][j] * vec[tmp_idx];
dot += coef * osc_term * datas[h_idx][j] * vec[tmp_idx];
}
out[i] += dot;
@ -223,5 +207,5 @@ PyArrayObject * td_ode_rhs(
out[i] += complex_t(0.,1.) * energy[i] * vec[i];
}
return create_py_array_from_vector(out, num_rows);
return py::array(num_rows, out);
}

View File

@ -19,16 +19,19 @@
#include <vector>
#include <complex>
#include <map>
#include <Python.h>
#include <pybind11/pybind11.h>
#include <pybind11/numpy.h>
PyArrayObject * td_ode_rhs(
double ,
PyArrayObject * ,
PyObject * ,
PyObject * ,
PyObject * ,
PyObject * ,
PyObject *
);
#include "types.hpp"
namespace py = pybind11;
py::array_t<complex_t> td_ode_rhs(double t,
py::array_t<complex_t> vec,
py::object global_data,
py::object exp,
py::object system,
py::object channels,
py::object reg);
#endif // _NUMERIC_INTEGRATOR_HPP

View File

@ -1,31 +0,0 @@
#!python
#cython: language_level=3
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
cimport cython
cimport numpy as np
cdef extern from "numeric_integrator.hpp":
cdef np.ndarray td_ode_rhs(
double t,
np.ndarray vec,
dict global_data,
dict exp,
list system,
dict channels,
register
) except +
def td_ode_rhs_static(t, vec, global_data, exp, system, channels, register):
return td_ode_rhs(t, vec, global_data, exp, system, channels, register)

View File

@ -0,0 +1,122 @@
#include "pulse_utils.hpp"
#include "zspmv.hpp"
complex_t internal_expect_psi_csr(const py::array_t<complex_t>& data,
const py::array_t<int>& ind,
const py::array_t<int>& ptr,
const py::array_t<complex_t>& vec) {
auto data_raw = data.unchecked<1>();
auto vec_raw = vec.unchecked<1>();
auto ind_raw = ind.unchecked<1>();
auto ptr_raw = ptr.unchecked<1>();
auto nrows = vec.shape(0);
complex_t temp, expt = 0;
for (decltype(nrows) row = 0; row < nrows; row++) {
temp = 0;
auto vec_conj = std::conj(vec_raw[row]);
for (auto j = ptr_raw[row]; j < ptr_raw[row + 1]; j++) {
temp += data_raw[j] * vec_raw[ind_raw[j]];
}
expt += vec_conj * temp;
}
return expt;
}
py::object expect_psi_csr(py::array_t<complex_t> data,
py::array_t<int> ind,
py::array_t<int> ptr,
py::array_t<complex_t> vec,
bool isherm){
complex_t expt = internal_expect_psi_csr(data, ind, ptr, vec);
if(isherm){
return py::cast(std::real(expt));
}
return py::cast(expt);
}
py::array_t<double> occ_probabilities(py::array_t<int> qubits,
py::array_t<complex_t> state,
py::list meas_ops){
auto meas_size = meas_ops.size();
py::array_t<double> probs(meas_size);
auto probs_raw = probs.mutable_unchecked<1>();
for(decltype(meas_size) i=0; i < meas_size; i++){
auto data = meas_ops[i].attr("data").attr("data").cast<py::array_t<complex_t>>();
auto ind = meas_ops[i].attr("data").attr("indices").cast<py::array_t<int>>();
auto ptr = meas_ops[i].attr("data").attr("indptr").cast<py::array_t<int>>();
probs_raw[i] = std::real(internal_expect_psi_csr(data, ind, ptr, state));
}
return probs;
}
void write_shots_memory(py::array_t<unsigned char> mem,
py::array_t<unsigned int> mem_slots,
py::array_t<double> probs,
py::array_t<double> rand_vals)
{
auto nrows = mem.shape(0);
auto nprobs = probs.shape(0);
unsigned char temp;
auto mem_raw = mem.mutable_unchecked<2>();
auto mem_slots_raw = mem_slots.unchecked<1>();
auto probs_raw = probs.unchecked<1>();
auto rand_vals_raw = rand_vals.unchecked<1>();
for(decltype(nrows) i = 0; i < nrows; i++){
for(decltype(nprobs) j = 0; j < nprobs; j++) {
temp = static_cast<unsigned char>(probs_raw[j] > rand_vals_raw[nprobs*i+j]);
if(temp) {
mem_raw(i, mem_slots_raw[j]) = temp;
}
}
}
}
void oplist_to_array(py::list A, py::array_t<complex_t> B, int start_idx)
{
auto lenA = A.size();
if((start_idx+lenA) > B.shape(0)) {
throw std::runtime_error(std::string("Input list does not fit into array if start_idx is ") + std::to_string(start_idx) + ".");
}
auto B_raw = B.mutable_unchecked<1>();
for(decltype(lenA) kk=0; kk < lenA; kk++){
auto item = A[kk].cast<py::list>();
B_raw[start_idx+kk] = complex_t(item[0].cast<double>(), item[1].cast<double>());
}
}
template <typename T>
T * get_raw_data(py::array_t<T> array)
{
return static_cast<T *>(array.request().ptr);
}
py::array_t<complex_t> spmv_csr(py::array_t<complex_t> data,
py::array_t<int> ind,
py::array_t<int> ptr,
py::array_t<complex_t> vec)
{
auto data_raw = get_raw_data(data);
auto ind_raw = get_raw_data(ind);
auto ptr_raw = get_raw_data(ptr);
auto vec_raw = get_raw_data(vec);
auto num_rows = vec.shape(0);
py::array_t<complex_t> out(num_rows);
auto out_raw = get_raw_data(out);
memset(&out_raw[0], 0, num_rows * sizeof(complex_t));
zspmvpy(data_raw, ind_raw, ptr_raw, vec_raw, 1.0, out_raw, num_rows);
return out;
}

View File

@ -0,0 +1,93 @@
/**
* This code is part of Qiskit.
*
* (C) Copyright IBM 2018, 2019, 2020.
*
* This code is licensed under the Apache License, Version 2.0. You may
* obtain a copy of this license in the LICENSE.txt file in the root directory
* of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
*
* Any modifications or derivative works of this code must retain this
* copyright notice, and modified files need to carry a notice indicating
* that they have been altered from the originals.
*/
#ifndef PULSE_UTILS_H
#define PULSE_UTILS_H
#include <pybind11/pybind11.h>
#include <pybind11/numpy.h>
#include "types.hpp"
namespace py = pybind11;
py::object expect_psi_csr(py::array_t<complex_t> data,
py::array_t<int> ind,
py::array_t<int> ptr,
py::array_t<complex_t> vec,
bool isherm);
//============================================================================
// Computes the occupation probabilities of the specifed qubits for
// the given state.
// Args:
// qubits (int array): Ints labelling which qubits are to be measured.
//============================================================================
py::array_t<double> occ_probabilities(py::array_t<int> qubits,
py::array_t<complex_t> state,
py::list meas_ops);
//============================================================================
// Converts probabilities back into shots
// Args:
// mem
// mem_slots
// probs: expectation value
// rand_vals: random values used to convert back into shots
//============================================================================
void write_shots_memory(py::array_t<unsigned char> mem,
py::array_t<unsigned int> mem_slots,
py::array_t<double> probs,
py::array_t<double> rand_vals);
//============================================================================
// Takes a list of complex numbers represented by a list
// of pairs of floats, and inserts them into a complex NumPy
// array at a given starting index.
//
// Parameters:
// A (list): A nested-list of [re, im] pairs.
// B(ndarray): Array for storing complex numbers from list A.
// start_idx (int): The starting index at which to insert elements.
//============================================================================
void oplist_to_array(py::list A, py::array_t<complex_t> B, int start_idx);
//============================================================================
// Sparse matrix, dense vector multiplication.
// Here the vector is assumed to have one-dimension.
// Matrix must be in CSR format and have complex entries.
//
// Parameters
// ----------
// data : array
// Data for sparse matrix.
// idx : array
// Indices for sparse matrix data.
// ptr : array
// Pointers for sparse matrix data.
// vec : array
// Dense vector for multiplication. Must be one-dimensional.
//
// Returns
// -------
// out : array
// Returns dense array.
//============================================================================
py::array_t<complex_t> spmv_csr(py::array_t<complex_t> data,
py::array_t<int> ind,
py::array_t<int> ptr,
py::array_t<complex_t> vec);
#endif //PULSE_UTILS_H

View File

@ -0,0 +1,27 @@
/**
* This code is part of Qiskit.
*
* (C) Copyright IBM 2018, 2019, 2020.
*
* This code is licensed under the Apache License, Version 2.0. You may
* obtain a copy of this license in the LICENSE.txt file in the root directory
* of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
*
* Any modifications or derivative works of this code must retain this
* copyright notice, and modified files need to carry a notice indicating
* that they have been altered from the originals.
*/
#include "numeric_integrator.hpp"
#include "pulse_utils.hpp"
PYBIND11_MODULE(pulse_utils, m) {
m.doc() = "Utility functions for pulse simulator"; // optional module docstring
m.def("td_ode_rhs_static", &td_ode_rhs, "Compute rhs for ODE");
m.def("cy_expect_psi_csr", &expect_psi_csr, "Expected value for a operator");
m.def("occ_probabilities", &occ_probabilities, "Computes the occupation probabilities of the specifed qubits for the given state");
m.def("write_shots_memory", &write_shots_memory, "Converts probabilities back into shots");
m.def("oplist_to_array", &oplist_to_array, "Insert list of complex numbers into numpy complex array");
m.def("spmv_csr", &spmv_csr, "Sparse matrix, dense vector multiplication.");
}

View File

@ -0,0 +1,66 @@
#include "test_python_to_cpp.hpp"
#include <csignal>
#include <unordered_map>
#include <vector>
#include <complex>
#include <Python.h>
#include <iostream>
#include "python_to_cpp.hpp"
bool cpp_test_py_list_to_cpp_vec(PyObject * val){
// val = [1., 2., 3.]
auto vec = get_value<std::vector<double>>(val);
auto expected = std::vector<double>{1., 2., 3.};
return vec == expected;
}
bool cpp_test_py_list_of_lists_to_cpp_vector_of_vectors(PyObject * val){
// val = [[1., 2., 3.]]
auto vec = get_value<std::vector<std::vector<double>>>(val);
auto expected = std::vector<std::vector<double>>{{1., 2., 3.}};
return vec == expected;
}
bool cpp_test_py_dict_string_numeric_to_cpp_map_string_numeric(PyObject * val){
// val = {"key": 1}
auto map = get_value<std::unordered_map<std::string, long>>(val);
auto expected = std::unordered_map<std::string, long>{{"key", 1}};
return map == expected;
}
bool cpp_test_py_dict_string_list_of_list_of_doubles_to_cpp_map_string_vec_of_vecs_of_doubles(PyObject * val){
// val = {"key": [[1., 2., 3.]]}
auto map = get_value<std::unordered_map<std::string, std::vector<std::vector<double>>>>(val);
auto expected = std::unordered_map<std::string, std::vector<std::vector<double>>>{{"key", {{1., 2., 3.}}}};
return map == expected;
}
bool cpp_test_np_array_of_doubles(PyArrayObject * val){
// val = np.array([0., 1., 2., 3.])
auto vec = get_value<NpArray<double>>(val);
if(vec[0] != 0. || vec[1] != 1. || vec[2] != 2. || vec[3] != 3.)
return false;
return true;
}
bool cpp_test_evaluate_hamiltonians(PyObject * val){
// TODO: Add tests!
return false;
}
bool cpp_test_py_ordered_map(PyObject * val){
// Ordered map should guarantee insertion order.
// val = {"D0": 1, "U0": 2, "D1": 3, "U1": 4}
std::vector<std::string> order = {"D0", "U0", "D1", "U1"};
auto ordered = get_value<ordered_map<std::string, long>>(val);
size_t i = 0;
for(const auto& elem: ordered) {
auto key = elem.first;
if(key != order[i++])
return false;
}
return true;
}

View File

@ -15,73 +15,38 @@
#ifndef _TEST_HELPERS_HPP
#define _TEST_HELPERS_HPP
#include <csignal>
#include <unordered_map>
#include <vector>
#include <complex>
#include <Python.h>
#include <iostream>
#include <numpy/arrayobject.h>
#include "python_to_cpp.hpp"
#include <pybind11/pybind11.h>
#include <pybind11/numpy.h>
namespace py = pybind11;
// TODO: Test QuantumObj
// TODO: Test Hamiltonian
bool cpp_test_py_list_to_cpp_vec(PyObject * val){
// val = [1., 2., 3.]
auto vec = get_value<std::vector<double>>(val);
auto expected = std::vector<double>{1., 2., 3.};
return vec == expected;
}
bool cpp_test_py_list_to_cpp_vec(PyObject * val);
bool cpp_test_py_list_of_lists_to_cpp_vector_of_vectors(PyObject * val);
bool cpp_test_py_dict_string_numeric_to_cpp_map_string_numeric(PyObject * val);
bool cpp_test_py_dict_string_list_of_list_of_doubles_to_cpp_map_string_vec_of_vecs_of_doubles(PyObject * val);
bool cpp_test_np_array_of_doubles(PyArrayObject * val);
bool cpp_test_evaluate_hamiltonians(PyObject * val);
bool cpp_test_py_ordered_map(PyObject * val);
bool cpp_test_py_list_of_lists_to_cpp_vector_of_vectors(PyObject * val){
// val = [[1., 2., 3.]]
auto vec = get_value<std::vector<std::vector<double>>>(val);
auto expected = std::vector<std::vector<double>>{{1., 2., 3.}};
return vec == expected;
}
PYBIND11_MODULE(test_python_to_cpp, m) {
m.doc() = "pybind11 test_python_to_cpp"; // optional module docstring
bool cpp_test_py_dict_string_numeric_to_cpp_map_string_numeric(PyObject * val){
// val = {"key": 1}
auto map = get_value<std::unordered_map<std::string, long>>(val);
auto expected = std::unordered_map<std::string, long>{{"key", 1}};
return map == expected;
}
bool cpp_test_py_dict_string_list_of_list_of_doubles_to_cpp_map_string_vec_of_vecs_of_doubles(PyObject * val){
// val = {"key": [[1., 2., 3.]]}
auto map = get_value<std::unordered_map<std::string, std::vector<std::vector<double>>>>(val);
auto expected = std::unordered_map<std::string, std::vector<std::vector<double>>>{{"key", {{1., 2., 3.}}}};
return map == expected;
}
bool cpp_test_np_array_of_doubles(PyArrayObject * val){
// val = np.array([0., 1., 2., 3.])
auto vec = get_value<NpArray<double>>(val);
if(vec[0] != 0. || vec[1] != 1. || vec[2] != 2. || vec[3] != 3.)
return false;
return true;
}
bool cpp_test_evaluate_hamiltonians(PyObject * val){
// TODO: Add tests!
return false;
}
bool cpp_test_py_ordered_map(PyObject * val){
// Ordered map should guarantee insertion order.
// val = {"D0": 1, "U0": 2, "D1": 3, "U1": 4}
std::vector<std::string> order = {"D0", "U0", "D1", "U1"};
auto ordered = get_value<ordered_map<std::string, long>>(val);
size_t i = 0;
for(const auto& elem: ordered) {
auto key = elem.first;
if(key != order[i++])
return false;
}
return true;
m.def("test_py_list_to_cpp_vec", [](py::list list) { return cpp_test_py_list_to_cpp_vec(list.ptr()); } , "");
m.def("test_py_list_of_lists_to_cpp_vector_of_vectors",
[](py::list list) { return cpp_test_py_list_of_lists_to_cpp_vector_of_vectors(list.ptr()); } , "");
m.def("test_py_dict_string_numeric_to_cpp_map_string_numeric",
[](py::dict dict) { return cpp_test_py_dict_string_numeric_to_cpp_map_string_numeric(dict.ptr()); } , "");
m.def("test_py_dict_string_list_of_list_of_doubles_to_cpp_map_string_vec_of_vecs_of_doubles",
[](py::dict dict) { return cpp_test_py_dict_string_list_of_list_of_doubles_to_cpp_map_string_vec_of_vecs_of_doubles(dict.ptr()); } , "");
m.def("test_np_array_of_doubles",
[](py::array_t<double> array_doubles) { return cpp_test_np_array_of_doubles(reinterpret_cast<PyArrayObject *>(array_doubles.ptr())); } , "");
m.def("test_evaluate_hamiltonians", [](py::list list) { return cpp_test_evaluate_hamiltonians(list.ptr()); } , "");
m.def("test_py_ordered_map", [](py::dict dict) { return cpp_test_py_ordered_map(dict.ptr()); } , "");
}
#endif // _TEST_HELPERS_HPP

View File

@ -1,54 +0,0 @@
#!python
#cython: language_level=3
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have be
cimport cython
from cpython.ref cimport PyObject
from libcpp.vector cimport vector
from libcpp.unordered_map cimport unordered_map
from libcpp.string cimport string
from libcpp.complex cimport complex
from libcpp cimport bool
import numpy as np
cimport numpy as np
# These definitions are only for testing the C++ wrappers over Python C API
cdef extern from "test_python_to_cpp.hpp":
cdef bool cpp_test_py_list_to_cpp_vec(list val)
cdef bool cpp_test_py_list_of_lists_to_cpp_vector_of_vectors(list val)
cdef bool cpp_test_py_dict_string_numeric_to_cpp_map_string_numeric(dict val)
cdef bool cpp_test_py_dict_string_list_of_list_of_doubles_to_cpp_map_string_vec_of_vecs_of_doubles(dict val)
cdef bool cpp_test_np_array_of_doubles(np.ndarray val)
cdef bool cpp_test_evaluate_hamiltonians(list val)
cdef bool cpp_test_py_ordered_map(dict val)
def test_py_list_to_cpp_vec(val):
return cpp_test_py_list_to_cpp_vec(val)
def test_py_list_of_lists_to_cpp_vector_of_vectors(val):
return cpp_test_py_list_of_lists_to_cpp_vector_of_vectors(val)
def test_py_dict_string_numeric_to_cpp_map_string_numeric(val):
return cpp_test_py_dict_string_numeric_to_cpp_map_string_numeric(val)
def test_py_dict_string_list_of_list_of_doubles_to_cpp_map_string_vec_of_vecs_of_doubles(val):
return cpp_test_py_dict_string_list_of_list_of_doubles_to_cpp_map_string_vec_of_vecs_of_doubles(val)
def test_np_array_of_doubles(val):
return cpp_test_np_array_of_doubles(val)
def test_evaluate_hamiltonians(val):
return cpp_test_evaluate_hamiltonians(val)
def test_py_ordered_map(val):
return cpp_test_py_ordered_map(val)

View File

@ -28,24 +28,9 @@ from qiskit.compiler import assemble
from qiskit.quantum_info import state_fidelity
from qiskit.pulse import (Schedule, Play, ShiftPhase, Acquire, SamplePulse, DriveChannel,
ControlChannel, AcquireChannel, MemorySlot)
from qiskit.providers.aer.pulse.pulse_system_model import PulseSystemModel
from qiskit.providers.aer.pulse.hamiltonian_model import HamiltonianModel
from qiskit.providers.aer.pulse.system_models.pulse_system_model import PulseSystemModel
from qiskit.providers.aer.pulse.system_models.hamiltonian_model import HamiltonianModel
USE_CPP_ODE_FUNC = True
def run_cython_and_cpp_solvers(func):
""" This is a temporary decorator to test both the C++ solver and Cython one.
It should be removed when the cyhton one will be removed """
@functools.wraps(func)
def wrapper(*args, **kwargs):
# pylint: disable=global-statement
global USE_CPP_ODE_FUNC
USE_CPP_ODE_FUNC = True
# Run C++ Solver first
func(*args, **kwargs)
# Run Cython Solver afterwards
USE_CPP_ODE_FUNC = False
func(*args, **kwargs)
return wrapper
class TestPulseSimulator(common.QiskitAerTestCase):
r"""PulseSimulator tests.
@ -77,7 +62,6 @@ class TestPulseSimulator(common.QiskitAerTestCase):
# Test single qubit gates (using meas level 2 and square drive)
# ---------------------------------------------------------------------
@run_cython_and_cpp_solvers
def test_x_gate(self):
"""
Test x gate. Set omega_d0=omega_0 (drive on resonance), phi=0, omega_a = pi/time
@ -103,7 +87,6 @@ class TestPulseSimulator(common.QiskitAerTestCase):
# set backend backend_options
backend_options = {'seed' : 9000}
backend_options['use_cpp_ode_func'] = True if USE_CPP_ODE_FUNC else False
# run simulation
result = self.backend_sim.run(qobj, system_model=system_model,
@ -114,7 +97,45 @@ class TestPulseSimulator(common.QiskitAerTestCase):
exp_counts = {'1': 256}
self.assertDictAlmostEqual(counts, exp_counts)
@run_cython_and_cpp_solvers
def test_1Q_noise(self):
"""
Tests simulation of noise operators. Uses the same schedule as test_x_gate, but
with a high level of amplitude damping noise.
"""
# setup system model
total_samples = 100
omega_0 = 2 * np.pi
omega_d0 = omega_0
omega_a = np.pi / total_samples
system_model = self._system_model_1Q(omega_0, omega_a)
# set up schedule and qobj
schedule = self._simple_1Q_schedule(0, total_samples)
qobj = assemble([schedule],
backend=self.backend_sim,
meas_level=2,
meas_return='single',
meas_map=[[0]],
qubit_lo_freq=[omega_d0/(2*np.pi)],
memory_slots=2,
shots=256)
# set seed for simulation, and set noise
backend_options = {'seed' : 9000}
backend_options['noise_model'] = {"qubit": {"0": {"Sm": 1}}}
# run simulation
result = self.backend_sim.run(qobj, system_model=system_model,
backend_options=backend_options).result()
# test results
# This level of noise is high enough that all counts should yield 0,
# whereas in the noiseless simulation (in test_x_gate) all counts yield 1
counts = result.get_counts()
exp_counts = {'0': 256}
self.assertDictAlmostEqual(counts, exp_counts)
def test_dt_scaling_x_gate(self):
"""
Test that dt is being used correctly by the solver.
@ -147,7 +168,6 @@ class TestPulseSimulator(common.QiskitAerTestCase):
# set backend backend_options
backend_options = {'seed' : 9000}
backend_options['use_cpp_ode_func'] = True if USE_CPP_ODE_FUNC else False
# run simulation
result = self.backend_sim.run(qobj, system_model=system_model,
@ -162,7 +182,6 @@ class TestPulseSimulator(common.QiskitAerTestCase):
for scale in scales:
scale_test(scale)
@run_cython_and_cpp_solvers
def test_hadamard_gate(self):
"""Test Hadamard. Is a rotation of pi/2 about the y-axis. Set omega_d0=omega_0
(drive on resonance), phi=-pi/2, omega_a = pi/2/time
@ -196,7 +215,6 @@ class TestPulseSimulator(common.QiskitAerTestCase):
# set backend backend_options
backend_options = {'seed' : 9000}
backend_options['use_cpp_ode_func'] = True if USE_CPP_ODE_FUNC else False
# run simulation
result = self.backend_sim.run(qobj, system_model=system_model,
@ -212,7 +230,6 @@ class TestPulseSimulator(common.QiskitAerTestCase):
self.assertDictAlmostEqual(prop, exp_prop, delta=0.01)
@run_cython_and_cpp_solvers
def test_arbitrary_gate(self):
"""Test a few examples w/ arbitary drive, phase and amplitude. """
shots = 10000 # large number of shots so get good proportions
@ -244,7 +261,6 @@ class TestPulseSimulator(common.QiskitAerTestCase):
# Run qobj and compare prop to expected result
backend_options = {'seed' : 9000}
backend_options['use_cpp_ode_func'] = True if USE_CPP_ODE_FUNC else False
result = self.backend_sim.run(qobj, system_model, backend_options).result()
counts = result.get_counts()
@ -262,7 +278,6 @@ class TestPulseSimulator(common.QiskitAerTestCase):
self.assertDictAlmostEqual(prop, exp_prop, delta=0.01)
@run_cython_and_cpp_solvers
def test_meas_level_1(self):
"""Test measurement level 1. """
@ -296,7 +311,6 @@ class TestPulseSimulator(common.QiskitAerTestCase):
# set backend backend_options
backend_options = {'seed' : 9000}
backend_options['use_cpp_ode_func'] = True if USE_CPP_ODE_FUNC else False
result = self.backend_sim.run(qobj, system_model, backend_options).result()
@ -315,7 +329,6 @@ class TestPulseSimulator(common.QiskitAerTestCase):
self.assertDictAlmostEqual(iq_prop, exp_prop, delta=0.01)
@run_cython_and_cpp_solvers
def test_gaussian_drive(self):
"""Test gaussian drive pulse using meas_level_2. Set omega_d0=omega_0 (drive on resonance),
phi=0, omega_a = pi/time
@ -357,7 +370,6 @@ class TestPulseSimulator(common.QiskitAerTestCase):
memory_slots=2,
shots=1000)
backend_options = {'seed' : 9000}
backend_options['use_cpp_ode_func'] = True if USE_CPP_ODE_FUNC else False
result = self.backend_sim.run(qobj, system_model, backend_options).result()
statevector = result.get_statevector()
@ -368,7 +380,6 @@ class TestPulseSimulator(common.QiskitAerTestCase):
self.assertGreaterEqual(
state_fidelity(statevector, exp_statevector), 0.99)
@run_cython_and_cpp_solvers
def test_frame_change(self):
"""Test frame change command. """
shots = 10000
@ -405,7 +416,6 @@ class TestPulseSimulator(common.QiskitAerTestCase):
shots=shots)
backend_options = {'seed' : 9000}
backend_options['use_cpp_ode_func'] = True if USE_CPP_ODE_FUNC else False
result = self.backend_sim.run(qobj, system_model, backend_options).result()
counts = result.get_counts()
exp_counts = {'0': shots}
@ -434,7 +444,6 @@ class TestPulseSimulator(common.QiskitAerTestCase):
shots=shots)
backend_options = {'seed' : 9000}
backend_options['use_cpp_ode_func'] = True if USE_CPP_ODE_FUNC else False
result = self.backend_sim.run(qobj, system_model, backend_options).result()
counts = result.get_counts()
@ -448,7 +457,6 @@ class TestPulseSimulator(common.QiskitAerTestCase):
exp_prop = {'0' : prop0, '1': 1 - prop0}
self.assertDictAlmostEqual(prop_shift, exp_prop, delta=0.01)
@run_cython_and_cpp_solvers
def test_three_level(self):
r"""Test 3 level system. Compare statevectors as counts only use bitstrings. Analytic form
given in _analytic_statevector_3level function docstring.
@ -501,7 +509,6 @@ class TestPulseSimulator(common.QiskitAerTestCase):
memory_slots=2,
shots=shots)
backend_options = {'seed' : 9000}
backend_options['use_cpp_ode_func'] = True if USE_CPP_ODE_FUNC else False
result = self.backend_sim.run(qobj, system_model, backend_options).result()
statevector = result.get_statevector()
@ -527,7 +534,6 @@ class TestPulseSimulator(common.QiskitAerTestCase):
memory_slots=2,
shots=shots)
backend_options = {'seed' : 9000}
backend_options['use_cpp_ode_func'] = True if USE_CPP_ODE_FUNC else False
result = self.backend_sim.run(qobj, system_model, backend_options).result()
statevector = result.get_statevector()
@ -538,7 +544,6 @@ class TestPulseSimulator(common.QiskitAerTestCase):
self.assertGreaterEqual(
state_fidelity(statevector, exp_statevector), 0.99)
@run_cython_and_cpp_solvers
def test_interaction(self):
r"""Test 2 qubit interaction via swap gates."""
@ -574,7 +579,6 @@ class TestPulseSimulator(common.QiskitAerTestCase):
memory_slots=2,
shots=shots)
backend_options = {'seed': 12387}
backend_options['use_cpp_ode_func'] = True if USE_CPP_ODE_FUNC else False
result_pi_swap = self.backend_sim.run(qobj, system_model, backend_options).result()
counts_pi_swap = result_pi_swap.get_counts()

View File

@ -16,10 +16,10 @@ Tests for pulse system generator functions
import unittest
from numpy import array, array_equal, kron
from test.terra.common import QiskitAerTestCase
from qiskit.providers.aer.pulse.pulse_system_model import PulseSystemModel
from qiskit.providers.aer.pulse.hamiltonian_model import HamiltonianModel
from qiskit.providers.aer.pulse import duffing_model_generators as model_gen
from qiskit.providers.aer.pulse.qobj.op_qobj import get_oper
from qiskit.providers.aer.pulse.system_models.pulse_system_model import PulseSystemModel
from qiskit.providers.aer.pulse.system_models.hamiltonian_model import HamiltonianModel
from qiskit.providers.aer.pulse.system_models import duffing_model_generators as model_gen
from qiskit.providers.aer.pulse.qutip_extra_lite.qobj_generators import get_oper
class TestDuffingModelGenerators(QiskitAerTestCase):
"""Tests for functions in duffing_model_generators.py"""

View File

@ -19,15 +19,17 @@ import qiskit
import qiskit.pulse as pulse
from qiskit.pulse import pulse_lib
from qiskit.compiler import assemble
from qiskit.providers.aer.pulse.pulse_system_model import PulseSystemModel
from qiskit.providers.aer.pulse.hamiltonian_model import HamiltonianModel
from qiskit.providers.aer.pulse.qobj.digest import digest_pulse_obj
from qiskit.providers.aer.pulse.system_models.pulse_system_model import PulseSystemModel
from qiskit.providers.aer.pulse.system_models.hamiltonian_model import HamiltonianModel
class TestDigest(QiskitAerTestCase):
"""Testing of functions in providers.aer.pulse.qobj.digest."""
"""Testing of functions in providers.aer.pulse.qobj.digest.
This may need to be totally removed"""
def setUp(self):
self.backend_sim = backend_sim = qiskit.Aer.get_backend('pulse_simulator')
self.skipTest('The functionality in digest is being refactored.')
def test_qubit_lo_freq_handling(self):
"""Test how digest_pulse_obj retrieves qubit_lo_freq from various locations."""
@ -39,7 +41,7 @@ class TestDigest(QiskitAerTestCase):
system_model = self._system_model_2Q()
pulse_qobj = assemble(schedules, backend=self.backend_sim)
op_system = digest_pulse_obj(pulse_qobj, system_model)
op_system = full_digest(pulse_qobj, system_model)
self.assertAlmostEqual(op_system.freqs['D0'], 4.999009804864)
self.assertAlmostEqual(op_system.freqs['D1'], 5.100990195135)
self.assertAlmostEqual(op_system.freqs['U0'], 5.100990195135)
@ -49,7 +51,7 @@ class TestDigest(QiskitAerTestCase):
system_model._qubit_freq_est = [4.9, 5.1]
pulse_qobj = assemble(schedules, backend=self.backend_sim)
op_system = digest_pulse_obj(pulse_qobj, system_model)
op_system = full_digest(pulse_qobj, system_model)
self.assertAlmostEqual(op_system.freqs['D0'], 4.9)
self.assertAlmostEqual(op_system.freqs['D1'], 5.1)
self.assertAlmostEqual(op_system.freqs['U0'], 5.1)
@ -59,7 +61,7 @@ class TestDigest(QiskitAerTestCase):
system_model._qubit_freq_est = [4.9, 5.1]
pulse_qobj = assemble(schedules, qubit_lo_freq=[4.8, 5.2], backend=self.backend_sim)
op_system = digest_pulse_obj(pulse_qobj, system_model)
op_system = full_digest(pulse_qobj, system_model)
self.assertAlmostEqual(op_system.freqs['D0'], 4.8)
self.assertAlmostEqual(op_system.freqs['D1'], 5.2)
self.assertAlmostEqual(op_system.freqs['U0'], 5.2)

View File

@ -23,8 +23,8 @@ from qiskit.test.mock import FakeOpenPulse2Q
import qiskit.pulse as pulse
from qiskit.pulse import pulse_lib
from qiskit.compiler import assemble
from qiskit.providers.aer.pulse.pulse_system_model import PulseSystemModel
from qiskit.providers.aer.pulse.hamiltonian_model import HamiltonianModel
from qiskit.providers.aer.pulse.system_models.pulse_system_model import PulseSystemModel
from qiskit.providers.aer.pulse.system_models.hamiltonian_model import HamiltonianModel
class BaseTestPulseSystemModel(QiskitAerTestCase):

View File

@ -13,8 +13,9 @@
import sys
import unittest
import numpy as np
from qiskit.providers.aer.pulse.qutip_lite.qobj import Qobj
from qiskit.providers.aer.pulse.cy.test_python_to_cpp import \
from qiskit.providers.aer.pulse.qutip_extra_lite.qobj import Qobj
#from qiskit.providers.aer.pulse.cy.test_python_to_cpp import \
from qiskit.providers.aer.pulse.de_solvers.test_python_to_cpp import \
test_py_list_to_cpp_vec, test_py_list_of_lists_to_cpp_vector_of_vectors,\
test_py_dict_string_numeric_to_cpp_map_string_numeric,\
test_py_dict_string_list_of_list_of_doubles_to_cpp_map_string_vec_of_vecs_of_doubles,\
@ -60,4 +61,3 @@ class TestPythonToCpp(unittest.TestCase):
# Since Python 3.6 dict insertion order is guaranted
arg = {"D0": 1, "U0": 2, "D1": 3, "U1": 4}
self.assertTrue(test_py_ordered_map(arg))

View File

@ -14,7 +14,7 @@ from qiskit import execute
from qiskit import QuantumCircuit
from qiskit import QuantumRegister
from qiskit.providers.aer.pulse.duffing_model_generators import duffing_system_model
from qiskit.providers.aer.pulse.system_models.duffing_model_generators import duffing_system_model
from qiskit.pulse import Schedule, Play, Acquire, Gaussian, DriveChannel, AcquireChannel, MemorySlot
from qiskit.providers.aer import QasmSimulator