Example¶
Grover Example¶
The Grover search algorithm uses AmplitudeAmplification to find the target specified in the oracle.
from spinqit import get_basic_simulator, get_compiler, Circuit, BasicSimulatorConfig
from spinqit import AmplitudeAmplification, GateBuilder, RepeatBuilder
from spinqit import H, X, Z
from spinqit.primitive import MultiControlledGateBuilder
from math import pi
circ = Circuit()
q = circ.allocateQubits(4)
hbuilder = RepeatBuilder(H, 4)
circ << (hbuilder.to_gate(), q)
# Build the oracle for 1100
oracle_builder = GateBuilder(4)
oracle_builder.append(X, [2])
oracle_builder.append(X, [3])
mcz_builder = MultiControlledGateBuilder(3, gate=Z)
oracle_builder.append(mcz_builder.to_gate(), list(range(4)))
oracle_builder.append(X, [2])
oracle_builder.append(X, [3])
grover = AmplitudeAmplification(oracle_builder.to_gate(), q)
circ.extend(grover.build())
# Set up the backend and the compiler
engine = get_basic_simulator()
comp = get_compiler("native")
optimization_level = 0
exe = comp.compile(circ, optimization_level)
config = BasicSimulatorConfig()
config.configure_shots(1024)
# Run
result = engine.execute(exe, config)
print(result.counts)
HHL Example¶
Systems of linear equations are foundamental to various science and engineering problems, especially many machine learning algorithms. The Harrow-Hassidim-Lloyd algorithm (HHL) can be used
from spinqit.algorithm import HHL
from spinqit import get_basic_simulator, BasicSimulatorConfig
import numpy as np
# Input the linear equations
mat = np.array([[2.5, -0.5], [-0.5, 2.5]])
vec = np.array([1, 0])
# Set up the backend
engine = get_basic_simulator()
config = BasicSimulatorConfig()
config.configure_shots(1024)
# Run
solver = HHL(mat, vec)
solver.run(engine, config)
print(solver.get_state())
print(solver.get_measurements())
VQE Example¶
Variational Quantum Eigensolver (VQE) can be used for quantum chemistry and optimization problems. The following example shows how to use VQE to calculate the ground state of the hydrogen molecule.
import numpy as np
from spinqit import generate_hamiltonian_matrix
from spinqit import Circuit, Rx, Rz, CX
from spinqit.algorithm import VQE
from spinqit.algorithm.optimizer import TorchOptimizer
ham = [("IIII", -0.04207255194749729),
("ZIII", 0.17771358229095718),
("IZII", 0.17771358229095718),
("IIZI", -0.24274501260934922),
("IIIZ", -0.24274501260934922),
("ZIZI", 0.1229333044929736),
("IZIZ", 0.1229333044929736),
("ZIIZ", 0.16768338855598627),
("IZZI", 0.16768338855598627),
("ZZII", 0.1705975927683594),
("IIZZ", 0.17627661394176986),
("YYXX", -0.044750084063012674),
("XXYY", -0.044750084063012674),
("YXXY", 0.044750084063012674),
("XYYX", 0.044750084063012674)]
depth = 1
qubit_num = len(ham[0][0])
Iter = 100
lr = 0.1
seed = 1024
np.random.seed(seed)
circ = Circuit()
qreg = circ.allocateQubits(qubit_num)
params = circ.add_params(shape=(depth, qubit_num, 3))
for d in range(depth):
for q in range(qubit_num):
circ << (Rx, qreg[q], params[d][q][0])
circ << (Rz, qreg[q], params[d][q][1])
circ << (Rx, qreg[q], params[d][q][2])
for q in range(qubit_num - 1):
circ.append(CX, [qreg[q], qreg[q + 1]])
circ.append(CX, [qreg[qubit_num - 1], qreg[0]])
optimizer = TorchOptimizer(maxiter=Iter, verbose=False, learning_rate=lr)
ham_mat = generate_hamiltonian_matrix(ham)
vqe = VQE(ham_mat, optimizer, ansatz=circ, params=(depth, qubit_num, 3))
loss_list = vqe.run(mode='torch', grad_method='backprop')
# loss_list = vqe.run(mode='spinq', grad_method='param_shift')
# loss_list = vqe.run(mode='spinq', grad_method='adjoint_differentiation')
print(loss_list)
QAOA Example¶
Quantum approximate optimization algorithm (QAOA) is a framework to solve optimization problems. This example shows how to solve the MaxCut problem using QAOA in SpinQit.
import numpy as np
from spinqit import get_basic_simulator, BasicSimulatorConfig
from spinqit import generate_hamiltonian_matrix
from spinqit.algorithm.optimizer.adam import ADAM
from spinqit.algorithm.qaoa import QAOA
vcount = 4
E = [(0,1), (1,2), (2,3), (3,0)]
# Build Hamiltonian
ham = []
for (u,v) in E:
pauli_str = ''
for i in range(vcount):
if i == u or i == v:
pauli_str += 'Z'
else:
pauli_str += 'I'
ham.append((pauli_str, 1.0))
print(ham)
# ham = [('ZZII', 1.0), ('IZZI', 1.0), ('IIZZ', 1.0), ('ZIIZ', 1.0), ('IZIZ', 1.0)]
qubit_num = vcount
depth = 4
iter_num = 30
lr = 0.1
np.random.seed(1024)
optimizer = TorchOptimizer(maxiter=iter_num, verbose=True, learning_rate=lr)
ham_mat = generate_hamiltonian_matrix(ham)
qaoa = QAOA(ham_mat, optimizer, depth)
loss = qaoa.run(mode='torch', grad_method='backprop')[-1]
result = qaoa.optimized_result
print(result.probabilities)
Quantum Counting Example¶
The example shows how to estimate the number of targets in the input values using the QuantumCounting algorithm. There should be 8 numbers ending with 1 in the example below.
from spinqit import get_basic_simulator, BasicSimulatorConfig
from spinqit import GateBuilder, RepeatBuilder
from spinqit import H, Z
from spinqit.algorithm import QuantumCounting
from math import pi
hbuilder = RepeatBuilder(H, 4)
# Build the oracle for ***1
oracle_builder = GateBuilder(4)
oracle_builder.append(Z, [3])
# Set up the backend
engine = get_basic_simulator()
config = BasicSimulatorConfig()
config.configure_shots(1024)
qc = QuantumCounting(4, 4, hbuilder.to_gate(), oracle_builder.to_gate())
ret = qc.run(engine, config)
print(ret)
QSearching Example¶
The QSearching algorithm searches for the index of the maximum or minimum value in an array.
from spinqit.algorithm import QSearching
dataset = [2, 3, 1, 4, 5, 6, 7, 15]
seed = 330
max_searcher = QSearching(seed=seed)
max_idx = max_searcher.search(dataset, show=False)
min_searcher = QSearching('min', backend_mode='torch', seed=seed)
min_idx = min_searcher.search(dataset, show=False)
print(max_idx, min_idx)
QNN Example¶
The example below shows how to use the PyTorch interface in SpinQit to solve a classification problem.
import numpy as np
import torch
import torch.optim as optim
from torch import nn
from functools import partial
from spinqit import Circuit, Rz, Ry, CX, generate_hamiltonian_matrix
from spinqit.primitive import amplitude_encoding
from spinqit.interface import to_qlayer, TorchQuantumFunction
from spinqit.algorithm.loss import expval
#@to_qlayer(backend_mode='nmr', grad_method='param_shift', measure=expval(generate_hamiltonian_matrix([('ZI', 1)])), ip='192.168.2.4', port=59585, account=('user1','123456'), task_name='classifier', task_desc='test')
@to_qlayer(backend_mode='torch', grad_method='backprop', measure=expval(generate_hamiltonian_matrix([('ZI', 1)])))
def build_circuit(state, weights_shape, qubit_num, layer_num):
circ = Circuit()
weight = circ.add_params(shape=weights_shape)
q = circ.allocateQubits(qubit_num)
state = state.numpy()
#encode the input vector
ilist = amplitude_encoding(state, q)
circ.extend(ilist)
for i in range(layer_num):
for j in range(qubit_num):
circ << (Rz, q[j], weight[i][j][0])
circ << (Ry, q[j], weight[i][j][1])
circ << (Rz, q[j], weight[i][j][2])
circ << (CX, q)
return circ
def get_data(file_path):
data = np.loadtxt(file_path)
Xdata = data[:, 0:2]
padding = 0.3 * np.ones((len(Xdata), 1))
X_pad = np.c_[np.c_[Xdata, padding], np.zeros((len(Xdata), 1))]
normalization = np.sqrt(np.sum(X_pad ** 2, -1))
X_norm = (X_pad.T / normalization).T
features = X_norm
Y = data[:, -1]
np.random.seed(0)
num_data = len(Y)
n_train = int(0.75 * num_data)
index = np.random.permutation(range(num_data))
x_train = torch.tensor(features[index[:n_train]], requires_grad=False).to(torch.float32)
y_train = torch.tensor(Y[index[:n_train]], requires_grad=False).to(torch.float32)
x_val = torch.tensor(features[index[n_train:]], requires_grad=False).to(torch.float32)
y_val = Y[index[n_train:]]
return n_train, x_train, y_train, x_val, y_val,
def get_model(qubit_num, layer_num, bias=None):
weight_shape = (layer_num, qubit_num, 3)
class MyQuantumModule(nn.Module):
def __init__(self, qlayer_func, weight_shape, bias=None):
super(MyQuantumModule, self).__init__()
self.w = nn.Parameter(0.01 * torch.randn(weight_shape), requires_grad=True)
self.b = bias
self.qlayer_func = qlayer_func
def forward(self, state):
res = torch.zeros(state.size(0))
if len(state.shape) > 1:
for i in range(state.size(0)):
myqlayer = self.qlayer_func(state[i])
kwargs = dict(
qlayer=myqlayer,
)
loss = TorchQuantumFunction.apply(kwargs, self.w)
res[i] += loss
else:
myqlayer = self.qlayer_func(state)
kwargs = dict(
qlayer=myqlayer,
)
res = TorchQuantumFunction.apply(kwargs, self.w)
if self.b is not None:
return res + self.b
return res
qlayer_func = partial(build_circuit, weights_shape = weight_shape, qubit_num=qubit_num, layer_num=layer_num)
model = MyQuantumModule(qlayer_func, weight_shape, bias)
return model
def test_train(model, num_train, features_train, labels_train, features_val, labels_val):
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9, nesterov=True)
loss_fn = nn.MSELoss()
iter = 45
batch_size = 10
print('-----begin train----------')
for i in range(iter):
batch_index = np.random.randint(0, num_train, (batch_size,))
feats_train_batch = features_train[batch_index]
Y_train_batch = labels_train[batch_index]
optimizer.zero_grad()
pred = model(feats_train_batch)
loss = loss_fn(pred, Y_train_batch)
print(f'Loss : {loss.item()}')
loss.backward()
optimizer.step()
print('---------begin predict--------------')
total_error = 0
with torch.no_grad():
for k in range(len(features_val)):
test_x = features_val[k].reshape(-1)
pred = model(test_x)
print(pred, labels_val[k])
if abs(labels_val[k] - np.sign(pred.item())) > 1e-5:
total_error = total_error + 1
print(total_error)
assert np.allclose(total_error, 0)
if __name__ == '__main__':
file_path = "resource/iris_classes_data.txt"
qubit_num, layer_num = 2, 6
seed = 1024
np.random.seed(seed)
torch.random.manual_seed(seed)
bias = nn.Parameter(torch.tensor(0.0), requires_grad=True)
model = get_model(qubit_num, layer_num, bias=bias)
num_train, features_train, labels_train, features_val, labels_val, = get_data(file_path)
test_train(model, num_train, features_train, labels_train, features_val, labels_val, )
The example below shows how to use the TensorFlow interface in SpinQit to solve the same classification problem.
import numpy as np
import tensorflow as tf
from tensorflow.keras import Sequential, losses, optimizers
from spinqit import Circuit, Rz, Ry, CX, generate_hamiltonian_matrix, StateVector
from spinqit.interface import to_qlayer
from spinqit.interface.tf_interface import QuantumLayer
from spinqit.algorithm.loss import expval
@to_qlayer(backend_mode='torch',
grad_method='backprop',
measure=expval(generate_hamiltonian_matrix([('ZI', 1)])))
def build_circuit(state_shape, weights_shape, qubit_num, layer_num):
circ = Circuit()
state = circ.add_params(shape=state_shape)
weight = circ.add_params(shape=weights_shape)
q = circ.allocateQubits(qubit_num)
circ << (StateVector, q, state[:])
for i in range(layer_num):
for j in range(qubit_num):
circ << (Rz, q[j], weight[i][j][0])
circ << (Ry, q[j], weight[i][j][1])
circ << (Rz, q[j], weight[i][j][2])
circ << (CX, q)
return circ
def get_data(file_path):
data = np.loadtxt(file_path)
Xdata = data[:, 0:2]
padding = 0.3 * np.ones((len(Xdata), 1))
X_pad = np.c_[np.c_[Xdata, padding], np.zeros((len(Xdata), 1))]
normalization = np.sqrt(np.sum(X_pad ** 2, -1))
X_norm = (X_pad.T / normalization).T
features = X_norm
Y = data[:, -1]
np.random.seed(0)
num_data = len(Y)
n_train = int(0.75 * num_data)
index = np.random.permutation(range(num_data))
x_train = tf.Variable(features[index[:n_train]], trainable=False)
y_train = tf.Variable(Y[index[:n_train]], trainable=False)
x_val = tf.Variable(features[index[n_train:]], trainable=False)
y_val = Y[index[n_train:]]
return n_train, x_train, y_train, x_val, y_val,
def get_model(qubit_num, layer_num, bias=None):
weight_shape = (layer_num, qubit_num, 3)
state_shape = (2 ** qubit_num)
qlayer = build_circuit(state_shape, weight_shape, qubit_num, layer_num)
ql = QuantumLayer(qlayer, weight_shape, bias)
model = Sequential()
model.add(ql)
return model
def test_train(model, num_train, features_train, labels_train, features_val, labels_val):
optimizer = optimizers.SGD(learning_rate=0.01, momentum=0.9, nesterov=True)
loss_fn = losses.MeanSquaredError()
iter = 55
batch_size = 10
print('-----begin train----------')
for i in range(iter):
batch_index = np.random.randint(0, num_train, (batch_size,))
feats_train_batch = tf.gather(features_train, batch_index)
Y_train_batch = tf.gather(labels_train, batch_index)
with tf.GradientTape() as tape:
pred = model(feats_train_batch, training=True)
loss = loss_fn(Y_train_batch, pred)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
print(f'Loss : {loss.numpy()}')
print('---------begin predict--------------')
total_error = 0
for k in range(features_val.shape[0]):
test_x = features_val[k]
pred = model(test_x)
print(pred, labels_val[k])
if abs(labels_val[k] - np.sign(pred.numpy())) > 1e-5:
total_error = total_error + 1
print(total_error)
assert np.allclose(total_error, 0)
if __name__ == '__main__':
file_path = "resource/iris_classes_data.txt"
qubit_num, layer_num = 2, 6
seed = 1024
np.random.seed(seed)
tf.random.set_seed(seed)
bias = tf.Variable(0.0, trainable=True)
model = get_model(qubit_num, layer_num, bias=bias)
num_train, features_train, labels_train, features_val, labels_val, = get_data(file_path)
test_train(model, num_train, features_train, labels_train, features_val, labels_val)