num_test = 100
Xtest, y_test = generate_circle_data(num_test)
X_test = np.hstack((Xtest, np.zeros((Xtest.shape[0], 1)))) # Make input data 3D for Rot()
zz = model.qcircuit(params, x=X_test[0], y=y_test[0])
print(y_test[0], zz)
directory = "reupload_simple_circle"
# Generate training and test data
num_training = 200
num_test = 1000
epsilon = 0 #In re-uploading paper they take 2D input and convert it to 3D, but the last entry is simply 0.
# This is because the Rot() gate takes 3 parameters. But pennylane devides the gradient by this
# Value and therefore we get an error, when we do that. So we can either add some small value
# epsilon. Or we change the qnode class in pennylane. For instruction, see below.
Xdata, y_train = generate_circle_data(num_training)
X_train = np.hstack((Xdata, epsilon*np.ones((Xdata.shape[0], 1))))
Xtest, y_test = generate_circle_data(num_test)
X_test = np.hstack((Xtest, epsilon*np.ones((Xtest.shape[0], 1))))
epochs = 1
batch_size = 32
def training_run(width=None, num_layers=None):
device = 'forest.numpy_wavefunction'
qcirc = model.qcircuit
lr = 0.5
params = model.init_params
loss_list = []
for it in tqdm(range(epochs)):
H_train = Hessian(params, model, X=X_train[0:10], y=y_train[0:10])
Hev_train, Hv_train = np.linalg.eigh(H_train)
opt = qml.GradientDescentOptimizer(lr)
predicted_train, states_train = model.test(params, X_train, y_train)
accuracy_train = model.accuracy_score(y_train, predicted_train)
loss = model.cost(params, X_train, y_train)
loss_list.append(loss)
predicted_test, states_test = model.test(params, X_test, y_test)
accuracy_test = model.accuracy_score(y_test, predicted_test)
for Xbatch, ybatch in iterate_minibatches(X_train, y_train, batch_size=batch_size):
params = opt.step(lambda v: model.cost(v, Xbatch, ybatch), params)
res = [it + 1, loss, accuracy_train, accuracy_test]
print("Epoch: {:2d} | Loss: {:3f} | Train accuracy: {:3f} | Test accuracy: {:3f}".format(*res))
print("start_training")
training_run(width=2, num_layers=2)