import pennylane as qml

import matplotlib.pyplot as plt

import os

import pennylane as qml
from pennylane import expval, var

from vqc_loss_landscapes.pennylanecirq import *
from vqc_loss_landscapes.data_helper import *

from matplotlib import pyplot as plt


from pennylane import numpy as np
from pennylane.utils import _flatten, unflatten
from pennylane.optimize import AdamOptimizer, GradientDescentOptimizer

from tqdm import tqdm_notebook as tqdm

%load_ext autoreload
%autoreload 2

Quick test run of the circuit

width = 2
device = "qiskit.aer"
num_layers = 2

model = Reuploading_model(device = device, width=width, layers=num_layers)
dev = model.dev
params = model.init_params
model.qcircuit(params, x=[1,2,0], y=1)
0.458984375

Train Circuit and calculate Hessian during training

num_test = 100
Xtest, y_test = generate_circle_data(num_test)
X_test = np.hstack((Xtest, np.zeros((Xtest.shape[0], 1)))) # Make input data 3D for Rot()

zz = model.qcircuit(params, x=X_test[0], y=y_test[0])
print(y_test[0], zz)




directory = "reupload_simple_circle"

# Generate training and test data
num_training = 200
num_test = 1000

epsilon = 0  #In re-uploading paper they take 2D input and convert it to 3D, but the last entry is simply 0.
                # This is because the Rot() gate takes 3 parameters. But pennylane devides the gradient by this
                # Value and therefore we get an error, when we do that. So we can either add some small value
                # epsilon. Or we change the qnode class in pennylane. For instruction, see below.

Xdata, y_train = generate_circle_data(num_training)
X_train = np.hstack((Xdata, epsilon*np.ones((Xdata.shape[0], 1))))

Xtest, y_test = generate_circle_data(num_test)
X_test = np.hstack((Xtest, epsilon*np.ones((Xtest.shape[0], 1))))



epochs = 1
batch_size = 32

def training_run(width=None, num_layers=None):
    device = 'forest.numpy_wavefunction'
    qcirc = model.qcircuit

    lr = 0.5


    params = model.init_params

    loss_list = []

    for it in tqdm(range(epochs)):
        H_train = Hessian(params, model, X=X_train[0:10], y=y_train[0:10])
        Hev_train, Hv_train = np.linalg.eigh(H_train)
        opt = qml.GradientDescentOptimizer(lr)

        predicted_train, states_train = model.test(params, X_train, y_train)
        accuracy_train = model.accuracy_score(y_train, predicted_train)
        loss = model.cost(params, X_train, y_train)
        loss_list.append(loss)

        predicted_test, states_test = model.test(params, X_test, y_test)
        accuracy_test = model.accuracy_score(y_test, predicted_test)

        for Xbatch, ybatch in iterate_minibatches(X_train, y_train, batch_size=batch_size):
            params = opt.step(lambda v: model.cost(v, Xbatch, ybatch), params)

        res = [it + 1, loss, accuracy_train, accuracy_test]
        print("Epoch: {:2d} | Loss: {:3f} | Train accuracy: {:3f} | Test accuracy: {:3f}".format(*res))


print("start_training")
training_run(width=2, num_layers=2)
1 0.091796875
start_training
/Users/patrickhuembeli/anaconda3/envs/QC/lib/python3.7/site-packages/ipykernel_launcher.py:44: TqdmDeprecationWarning: This function will be removed in tqdm==5.0.0
Please use `tqdm.notebook.tqdm` instead of `tqdm.tqdm_notebook`
Hessian Dimension: 24, 24