def PlotCalibration(self):
     CalibrationFile = self.lineEditFile.text()
     power_calibration = np.loadtxt(CalibrationFile)
     #plot the calibration
     plt.figure(1, figsize=(15, 10))
     power_onto_cell_mW = power_calibration[1, :]
     cal_picker_power_mW = power_calibration[0, :]
     f.plot_data(cal_picker_power_mW,power_onto_cell_mW,'Picker power (mW)','Power onto cell (mW)',\
                 'Calibration curve: Power onto cell vs Picker power',None,111,show=False)
     plt.savefig(self.getDailyDirectory() +
                 '//Plot of Power onto cell vs Picker Power.png')
     plt.show()
Exemple #2
0
def plot_originals(ax_array, path, col):

    with open(path) as axis_data:
        data = json.load(axis_data)
        time_samples = data['N_targets']
        axis_targets = data['ang_pos_targets']
        target_times = data['target_times']
        tcp_vel = data['TCP_vel']
        n_axes = data['n_axis']
        delta_t = data['time_step_orig']
        n_targets = data['N_targets']

    axis_targets = axis_targets[0:n_axes]
    axis_vels = functions.derive_all(axis_targets, delta_t)
    axis_accs = functions.derive_all(axis_vels, delta_t)

    time_array = functions.real_time_array(n_targets, delta_t)
    functions.plot_data(ax_array[0][col], axis_targets, 'Angular Positions',
                        time_array)
    functions.plot_data(ax_array[1][col], axis_vels, 'Angular Velocity',
                        time_array)
    functions.plot_data(
        ax_array[2][col], axis_accs,
        'Angular Acceleration ' + str(functions.acc_sum(axis_accs, delta_t)),
        time_array)

    plotter = Plotter(ax_array)
    plotter.plot_TCP(axis_targets, col, time_array)
    def analyseCalibration(self):
        ephysfile = self.lineEditDir.text()  #ephys .smr file
        mean_picker_volts = f.GetMeanVolts(ephysfile,
                                           self.pulse_duration_ms,
                                           self.energy_list,
                                           dead_time=2,
                                           test=True)
        plt.figure(2, figsize=(15, 10))
        f.plot_data(self.energy_list,mean_picker_volts,'Input RL energy (uJ)','Mean Picker volts (V))',\
                    'Calibration curve: Mean Picker Volts versus Input RL energy',None,111,show=False)
        plt.savefig(
            self.TimeDirectory +
            '\Figure 2- Plot of Mean Picker Volts versus Input RL energy.png')
        plt.show()

        plt.figure(3, figsize=(15, 10))
        Power_density = f.convert_V_W(mean_picker_volts,
                                      self.picker_max_measurement_mW,
                                      self.picker_max_output_V,
                                      self.CalibrationFile, self.beam_diameter)

        f.plot_data(self.energy_list,Power_density,'Input RL energy (uJ)','Mean Power Density in sample (mW/um2))',\
                    'Calibration curve: Mean Power Density in sample versus Input RL energy',None,111,show=False)
        plt.savefig(
            self.TimeDirectory +
            '\Figure 3- Plot of Mean Power Density in sample versus Input RL energy.png'
        )
        plt.show()

        data = {
            'energy_list': self.energy_list,
            'mean_picker_volts': mean_picker_volts,
            'Power_density': Power_density
        }
        results = pd.DataFrame(data=data)
        results.to_csv(self.TimeDirectory +
                       '\Mean power density in sample vs energy list.csv')
        #Once saved, close window and go to next tab
        self.close()
def plotter_raw(ax_array, path, col):

    with open(path) as raw_data:
        data = json.load(raw_data)
        truth = data['jointArray']
        #truth = data['theTruth']
        delta_t = data['delta_t']
        # n_axes = len(truth)
        time_samples = list(map(lambda a: len(a), truth))[0] + 2

    axis_zeros = functions.build_zero_array(5, time_samples)
    # truth= [truth[:][0]] + axis_zeros

    n_axes = 6
    all_axis_poses = list(map(lambda *a: list(a), *truth))
    all_axis_poses = [all_axis_poses[0][:]
                      ] + all_axis_poses + [all_axis_poses[-1][:]]
    axis_vels = functions.derive_all(all_axis_poses, delta_t)
    axis_accs = functions.derive_all(axis_vels, delta_t)

    time_array = functions.real_time_array(time_samples, delta_t)

    functions.plot_data(ax_array[0][col], np.transpose(all_axis_poses),
                        text.position, time_array)

    oldvel = str(functions.vel_top(axis_vels, delta_t))
    print('old top velocity:' + oldvel)
    functions.plot_data(ax_array[1][col], np.transpose(axis_vels),
                        text.velocity + oldvel, time_array[:-1])

    oldacc = str(functions.acc_sum(axis_accs, delta_t))
    print('old acc sum: ' + oldacc)
    functions.plot_data(ax_array[2][col], np.transpose(axis_accs),
                        text.acceleration + oldacc, time_array[:-2])

    plotter = Plotter(ax_array)
    if (len(all_axis_poses) > 1):
        plotter.plot_TCP(all_axis_poses, col, time_array)
 def PlotChannels(self):
     ephys, picker, Vm, Im, picker_units, Vm_units, Im_units, Vm_Hz, Im_Hz, picker_Hz = f.loadEphysData(
         self.lineEditDir.text())
     # plot the data
     plt.figure(7, figsize=(20, 15))
     f.plot_data(picker.times,np.squeeze(picker),'Time (s)','Picker power meter\nmeasurement output (V)',\
         'Picker power meter (measurement output) voltage vs time',None,311,show=False)
     f.plot_data(Vm.times,
                 np.squeeze(Vm),
                 None,
                 f'Membrane Voltage\n({Vm.units})',
                 'Membrane voltage vs time', [-50, -10],
                 312,
                 show=False)
     f.plot_data(Im.times,
                 np.squeeze(Im),
                 None,
                 f'Membrane Current\n({Im.units})',
                 'Membrane current vs time', [-1, 1],
                 313,
                 show=False)
     plt.savefig(self.TimeDirectory +
                 '\Figure 7- Plot of Ephys Channels (Part 1).png')
     plt.show()
    def plot_optimized(self, path, colNumber):
        with open(path) as axis_data:
            data = json.load(axis_data)
            resolution = data['resolution']
            time_samples = data['N']
            n_targets = data['N_targets']
            axis_poses_original = data['optimal_poses']
            axis_vels_original = data['optimal_velocity']
            axis_accs_original = data['optimal_acceleration']
            axis_poses = data['optimal_poses_decimated']
            axis_vels = data['optimal_velocity_decimated']
            axis_accs = data['optimal_acceleration_decimated']
            axis_targets = data['ang_pos_targets']
            target_times = data['target_times']
            tcp_vel = data['TCP_vel']
            tcp_vel_original = data['TCP_vel_original']
            n_axes = data['n_axis']
            delta_t = data['time_step_orig']
            time_end = data['time_end']

        n_targets = n_targets + 2
        axis_poses_original = list(
            map(lambda a: list(map(lambda b: b - np.pi * 10, a)),
                axis_poses_original))

        n_axes = 6
        # axis_poses = list(map(lambda a: list(map(lambda b: b-np.pi*2,a)),axis_poses_original))
        axis_poses = axis_poses_original

        axis_zeros = functions.build_zero_array(5, time_samples)
        # axis_poses_original = [axis_poses_original[:][0]] + axis_zeros

        if (len(axis_poses) > 1):
            axis_poses = list(map(lambda *a: list(a), *axis_poses))

        axis_vels = functions.derive_all(axis_poses, delta_t)
        axis_accs = functions.derive_all(axis_vels, delta_t)

        if (len(axis_poses) > 1):
            axis_vels = np.transpose(axis_vels)
            axis_accs = np.transpose(axis_accs)

        time_array = functions.real_time_array(time_samples, delta_t)
        time_array_upsampled = functions.real_time_array(
            time_samples, delta_t * resolution)

        # functions.plot_data(self.ax_array[0][colNumber], axis_poses, 'Ang Pos Decimated', time_array)

        functions.plot_data(self.ax_array[0][colNumber], axis_poses_original,
                            text.position, time_array_upsampled)

        # functions.plot_data(self.ax_array[1][colNumber], axis_vels, 'Angular Velocities Decimated ' +str(functions.vel_top(axis_vels,delta_t)), time_array[:-1])

        newvel = str(functions.vel_top(axis_vels, delta_t))
        print('new top velocity: ' + newvel)
        functions.plot_data(self.ax_array[1][colNumber], axis_vels_original,
                            text.velocity + newvel, time_array_upsampled[:-1])

        newacc = str(functions.acc_sum(axis_accs, delta_t))
        print('new acc sum: ' + newacc)
        # functions.plot_data(self.ax_array[2][colNumber], axis_accs, 'Angular Acceleration ' +newacc, time_array[:-2])
        functions.plot_data(
            self.ax_array[2][colNumber], axis_accs_original,
            text.acceleration +
            str(functions.acc_sum(axis_accs_original, delta_t / resolution)),
            time_array_upsampled[:-2])

        # for k,t in enumerate(target_times):
        #     self.ax_array[0][colNumber].scatter(t/(delta_t*resolution), axis_targets[0][k])

        # for k,t in enumerate(target_times):
        #    self.ax_array[0][colNumber+1].scatter(t/(delta_t*resolution), axis_targets[0][k])

        if (len(axis_poses_original) > 1):
            # self.plot_TCP(axis_poses, colNumber, time_array)
            self.plot_TCP(np.transpose(axis_poses_original), colNumber,
                          time_array_upsampled)
Exemple #7
0
GOOG_df_M = GOOG_df.groupby(pd.Grouper(freq='M')).mean()
SP500_df_M = SP500_df.groupby(pd.Grouper(freq='M')).mean()

###############################################################################
######################## GET ONLY THE ADJ CLOSE COLUMN ########################

AAPL_adj_close = AAPL_df_M['Adj Close']
MSFT_adj_close = MSFT_df_M['Adj Close']
AMZN_adj_close = AMZN_df_M['Adj Close']
GOOG_adj_close = GOOG_df_M['Adj Close']
SP500_adj_close = SP500_df_M['Adj Close']

###############################################################################
#################### PLOTTING THE STOCK TAKEN INTO ANALYSIS ###################

f.plot_data(AAPL_adj_close, 'Adjusted close price of AAPL',
            ['Date (year)', 'Adj close price (USD)'], [colors[0]])

f.plot_data(MSFT_adj_close, 'Adjusted close price of MSFT',
            ['Date (year)', 'Adj close price (USD)'], [colors[1]])

f.plot_data(AMZN_adj_close, 'Adjusted close price of AMZN',
            ['Date (year)', 'Adj close price (USD)'], [colors[2]])

f.plot_data(GOOG_adj_close, 'Adjusted close price of GOOG',
            ['Date (year)', 'Adj close price (USD)'], [colors[3]])

###############################################################################
############ MERGE ALL STOCKS DATA INTO A DATA OBJECT AND PLOT IT #############

STOCKS_adj_close = pd.concat(
    [AAPL_adj_close, MSFT_adj_close, AMZN_adj_close, GOOG_adj_close], axis=1)
Exemple #8
0
plt.grid()
plt.legend()
plt.xlabel('Regularization parameter')
plt.ylabel('Accuracy')
plt.title('Accuracy of SVM with different kernels on 1st data matrix')
plt.savefig('SVM1.png')
plt.show()
''' Grid search using the multi-layer perceptron '''

y_train = y_train.reshape(-1, 1)
y_test = y_test.reshape(-1, 1)

y_train = to_categorical(y_train)
y_test = to_categorical(y_test)

nh = np.array([1, 2, 4, 8])  #hidden nodes
et = np.array([0.001, 0.01, 0.05, 0.1])  #Learning rate

train_accuracy = np.zeros((len(nh), len(et)))
test_accuracy = np.zeros((len(nh), len(et)))

for i, n in enumerate(nh):
    for j, e in enumerate(et):
        mlp1 = mlp.mlp(X_train, y_train, nhidden=n, eta=e, linear=False)
        mlp1.earlystopping(X_train, y_train, X_test, y_test)
        train_accuracy[i, j] = (mlp1.score(X_train, y_train))
        test_accuracy[i, j] = (mlp1.score(X_test, y_test))

plot_data(et, nh, train_accuracy)
plot_data(et, nh, test_accuracy)
Exemple #9
0
test_accuracy = np.zeros((len(et), len(ld)))
critical_accuracy = np.zeros((len(et), len(ld)))

for i, l in enumerate(ld):
    for j, e in enumerate(et):

        clf = logistic_regression.logistic_regression(eta=e,
                                                      num_iter=1000,
                                                      ld=l)
        clf.fit(X_train, y_train)

        train_accuracy[i, j] = (clf.score(X_train, y_train))
        test_accuracy[i, j] = (clf.score(X_test, y_test))
        critical_accuracy[i, j] = (clf.score(data_critical, labels_critical))

plot_data(et, ld, train_accuracy)
plot_data(et, ld, test_accuracy)
plot_data(et, ld, critical_accuracy)
'''
Now using MLP
'''

#Turn into one-hot vector
y_test = to_categorical(y_test)
y_train = to_categorical(y_train)
labels_critical = to_categorical(labels_critical)

#parameter values
nh = np.array([50, 100])  #hidden nodes
et = np.array([0.01, 0.1])  #Learning rate
Exemple #10
0
def main():
    #getting and cleaning data
    data = functions.get_array('data')
    data = [i.split('\t') for i in data]

    #date range if wanted
    #mm/dd/yy format
    user_in = input("Date range? ")
    if(len(user_in) > 0):
        user_in = user_in.split(' ')
        dates = [functions.strip_time(i) for i in user_in]

    #trimming data if date range if given
    if 'dates' in locals():
        data = functions.date_trim(data, dates)
        if(data == 0): 
            print("No data in that range")
            return

    #binning data
    print("Total number of runs in selection: ",len(data))
    sigma = 4.0 #sigma of data to trim
    bins = np.arange(45) #one bin for each km out to 45km
    data = functions.bin_data(data, bins, sigma)
    #splitting up data into variables and joining bins together
    start = list(itertools.chain.from_iterable(data[0]))
    time = list(itertools.chain.from_iterable(data[1]))
    distance = list(itertools.chain.from_iterable(data[2]))
    elevation = list(itertools.chain.from_iterable(data[3]))
    PRs = list(itertools.chain.from_iterable(data[4]))

    ##################
    #     plots      #
    ##################

    #time v distance
    functions.plot_data(time, distance, 1, "Time (minutes)", "Distance (km)", 211)
    #converting to miles
    miles = functions.convert_distance(distance,'miles')
    functions.plot_data(time, miles, 1, "Time (minutes)", "Distance (miles)", 212)

    #histogram of runs per distance
    bins = np.arange(45)
    functions.plot_hist(distance, bins, 2, "Distance (km)", "Number of runs", 211)
    bins = np.arange(28)
    functions.plot_hist(miles, bins, 2, "Distance (miles)", "Number of runs", 212)

    #distance v elevation
    #only plotting runs with >0 meters elevation
    index = [i for i in range(0,len(elevation)) if elevation[i] != 0]
    elevation1 = [elevation[i] for i in index]
    distance1 = [distance[i] for i in index]
    miles1 = [miles[i] for i in index]
    feet = functions.convert_distance(elevation1,'feet')
    functions.plot_data(distance1, elevation1, 3, "Distance (km)", "Elevation (m)", 211)
    functions.plot_data(miles1, feet, 3, "Distance (miles)", "Elevation (ft)", 212)

    #only plotting runs with >1 PR
    PR1 = [i for i in PRs if i != 0]
    bins = np.arange(25)
    functions.plot_hist(PR1, bins, 4, "PRs set in a single run", "Number of runs", 0)

    print("Average length of run: "+str(stats.mean(distance))+' km \t'+str(stats.mean(miles))+' miles')
    print("Average elevation of run: "+str(stats.mean(elevation))+' m \t'+str(stats.mean(feet))+' feet')

    plt.show()
    return
Exemple #11
0
def main(argv=None):
  
  from cleverhans_tutorials import check_installation
  check_installation(__file__)
  
  if not os.path.exists( CONFIG.SAVE_PATH ):
    os.makedirs( CONFIG.SAVE_PATH )
  save_path_data = CONFIG.SAVE_PATH + 'data/'
  if not os.path.exists( save_path_data ):
    os.makedirs( save_path_data )
  model_path = CONFIG.SAVE_PATH + '../all/' +  CONFIG.DATASET + '/'
  if not os.path.exists( model_path ):
    os.makedirs( model_path )
    os.makedirs( model_path + 'data/' )
  
  nb_epochs = FLAGS.nb_epochs
  batch_size = FLAGS.batch_size
  learning_rate = FLAGS.learning_rate
  nb_filters = FLAGS.nb_filters
  len_x = int(CONFIG.NUM_TEST/2)
  
  start = time.time()

  # Object used to keep track of (and return) key accuracies
  report = AccuracyReport()

  # Set seeds to improve reproducibility
  if CONFIG.DATASET == 'mnist' or CONFIG.DATASET == 'cifar10':
    tf.set_random_seed(1234)
    np.random.seed(1234)
    rd.seed(1234)
  elif CONFIG.DATASET == 'moon' or CONFIG.DATASET == 'dims':
    tf.set_random_seed(13)
    np.random.seed(1234)
    rd.seed(0)          
  
  # Set logging level to see debug information
  set_log_level(logging.DEBUG)

  # Create TF session
  tf_config = tf.ConfigProto(allow_soft_placement=True,log_device_placement=True)
  tf_config.gpu_options.per_process_gpu_memory_fraction = 0.2 
  sess = tf.Session(config=tf_config)   
  
  if CONFIG.DATASET == 'mnist':
    # Get MNIST data
    mnist = MNIST(train_start=0, train_end=CONFIG.NUM_TRAIN,
                  test_start=0, test_end=CONFIG.NUM_TEST)
    x_train, y_train = mnist.get_set('train')
    x_test, y_test = mnist.get_set('test')
  elif CONFIG.DATASET == 'cifar10':
    # Get CIFAR10 data
    data = CIFAR10(train_start=0, train_end=CONFIG.NUM_TRAIN,
                  test_start=0, test_end=CONFIG.NUM_TEST)
    dataset_size = data.x_train.shape[0]
    dataset_train = data.to_tensorflow()[0]
    dataset_train = dataset_train.map(
      lambda x, y: (random_shift(random_horizontal_flip(x)), y), 4)
    dataset_train = dataset_train.batch(batch_size)
    dataset_train = dataset_train.prefetch(16)
    x_train, y_train = data.get_set('train')
    x_test, y_test = data.get_set('test')                             
  elif CONFIG.DATASET == 'moon':
    # Create a two moon example
    X, y = make_moons(n_samples=(CONFIG.NUM_TRAIN+CONFIG.NUM_TEST), noise=0.2,
                      random_state=0)
    X = StandardScaler().fit_transform(X)
    x_train1, x_test1, y_train1, y_test1 = train_test_split(X, y,
                                            test_size=(CONFIG.NUM_TEST/(CONFIG.NUM_TRAIN
                                            +CONFIG.NUM_TEST)), random_state=0)                          
    x_train, y_train, x_test, y_test = normalize_reshape_inputs_2d(model_path, x_train1,
                                                                   y_train1, x_test1,
                                                                   y_test1)
  elif CONFIG.DATASET == 'dims':
    X, y = make_moons(n_samples=(CONFIG.NUM_TRAIN+CONFIG.NUM_TEST), noise=0.2,
                      random_state=0)
    X = StandardScaler().fit_transform(X)
    x_train1, x_test1, y_train1, y_test1 = train_test_split(X, y,
                                            test_size=(CONFIG.NUM_TEST/(CONFIG.NUM_TRAIN
                                            +CONFIG.NUM_TEST)), random_state=0)                          
    x_train2, y_train, x_test2, y_test = normalize_reshape_inputs_2d(model_path, x_train1,
                                                                     y_train1,x_test1,
                                                                     y_test1)
    x_train, x_test = add_noise_and_QR(x_train2, x_test2, CONFIG.NUM_DIMS)

  np.save(os.path.join(save_path_data, 'x_test'), x_test)
  np.save(os.path.join(save_path_data, 'y_test'), y_test)

  # Use Image Parameters
  img_rows, img_cols, nchannels = x_train.shape[1:4]
  nb_classes = y_train.shape[1]

  # Define input TF placeholder
  x = tf.placeholder(tf.float32, shape=(None, img_rows, img_cols,
                                        nchannels))
  y = tf.placeholder(tf.float32, shape=(None, nb_classes))

  # Train an model
  train_params = {
      'nb_epochs': nb_epochs,
      'batch_size': batch_size,
      'learning_rate': learning_rate
  }
  eval_params = {'batch_size': 1}
  rng = np.random.RandomState([2017, 8, 30])
  
  with open(CONFIG.SAVE_PATH + 'acc_param.txt', 'a') as fi:

    def do_eval(adv_x, preds, x_set, y_set, report_key):
      acc, pred_np, adv_x_np = model_eval(sess, x, y, preds, adv_x, nb_classes, x_set,
                                          y_set, args=eval_params)
      setattr(report, report_key, acc)
      if report_key:
        print('Accuracy on %s examples: %0.4f' % (report_key, acc), file=fi)
      return pred_np, adv_x_np
    
    if CONFIG.DATASET == 'mnist':
      trained_model_path = model_path + 'data/trained_model'
      model = ModelBasicCNN('model1', nb_classes, nb_filters)
    elif CONFIG.DATASET == 'cifar10':
      trained_model_path = model_path + 'data/trained_model'
      model = ModelAllConvolutional('model1', nb_classes, nb_filters,
                                    input_shape=[32, 32, 3])
    elif CONFIG.DATASET == 'moon':
      trained_model_path = model_path + 'data/trained_model'
      model = ModelMLP('model1', nb_classes)
    elif CONFIG.DATASET == 'dims':
      trained_model_path = save_path_data + 'trained_model'
      model = ModelMLP_dyn('model1', nb_classes, CONFIG.NUM_DIMS)
      
    preds = model.get_logits(x)
    loss = CrossEntropy(model, smoothing=0.1)
    
    def evaluate():
      _, _ = do_eval(x, preds, x_test, y_test, 'test during train')
    
    if os.path.isfile( trained_model_path + '.index' ):
      tf_model_load(sess, trained_model_path)
    else:
      if CONFIG.DATASET == 'mnist':
        train(sess, loss, x_train, y_train, evaluate=evaluate,
              args=train_params, rng=rng, var_list=model.get_params())
      elif CONFIG.DATASET == 'cifar10':
        train(sess, loss, None, None,
              dataset_train=dataset_train, dataset_size=dataset_size,
              evaluate=evaluate, args=train_params, rng=rng,
              var_list=model.get_params())
      elif CONFIG.DATASET == 'moon':
        train_2d(sess, loss, x, y, x_train, y_train, save=False, evaluate=evaluate,
                args=train_params, rng=rng, var_list=model.get_params())
      elif CONFIG.DATASET == 'dims':
        train_2d(sess, loss, x, y, x_train, y_train, evaluate=evaluate,
                args=train_params, rng=rng, var_list=model.get_params())
      saver = tf.train.Saver()
      saver.save(sess, trained_model_path)
    
    # Evaluate the accuracy on test examples
    if os.path.isfile( save_path_data + 'logits_zero_attacked.npy' ):
      logits_0 = np.load(save_path_data + 'logits_zero_attacked.npy')
    else:
      _, _ = do_eval(x, preds, x_train, y_train, 'train')
      logits_0, _ = do_eval(x, preds, x_test, y_test, 'test')
      np.save(os.path.join(save_path_data, 'logits_zero_attacked'), logits_0) 
    
    if CONFIG.DATASET == 'moon':
      num_grid_points = 5000
      if os.path.isfile( model_path + 'data/images_mesh' + str(num_grid_points) + '.npy' ):
        x_mesh = np.load(model_path + 'data/images_mesh' + str(num_grid_points) + '.npy')
        logits_mesh = np.load(model_path + 'data/logits_mesh' + str(num_grid_points) + '.npy')
      else:
        xx, yy = np.meshgrid(np.linspace(0, 1, num_grid_points), np.linspace(0, 1, num_grid_points)) 
        x_mesh1 = np.stack([np.ravel(xx), np.ravel(yy)]).T
        y_mesh1 = np.ones((x_mesh1.shape[0]),dtype='int64')
        x_mesh, y_mesh, _, _ = normalize_reshape_inputs_2d(model_path, x_mesh1, y_mesh1)
        logits_mesh, _ = do_eval(x, preds, x_mesh, y_mesh, 'mesh')
        x_mesh = np.squeeze(x_mesh)
        np.save(os.path.join(model_path, 'data/images_mesh'+str(num_grid_points)), x_mesh)
        np.save(os.path.join(model_path, 'data/logits_mesh'+str(num_grid_points)), logits_mesh)
        
    points_x = x_test[:len_x]
    points_y = y_test[:len_x]
    points_x_bar = x_test[len_x:]
    points_y_bar = y_test[len_x:] 
     
    # Initialize the CW attack object and graph
    cw = CarliniWagnerL2(model, sess=sess) 
    
    # first attack
    attack_params = {
        'learning_rate': CONFIG.CW_LEARNING_RATE,
        'max_iterations': CONFIG.CW_MAX_ITERATIONS
      }
    
    if CONFIG.DATASET == 'moon':
     
      out_a = compute_polytopes_a(x_mesh, logits_mesh, model_path)
      attack_params['const_a_min'] = out_a
      attack_params['const_a_max'] = 100
    
    adv_x = cw.generate(x, **attack_params) 
      
    if os.path.isfile( save_path_data + 'images_once_attacked.npy' ):
      adv_img_1 = np.load(save_path_data + 'images_once_attacked.npy')
      logits_1 = np.load(save_path_data + 'logits_once_attacked.npy')
    else:
      #Evaluate the accuracy on adversarial examples
      preds_adv = model.get_logits(adv_x)
      logits_1, adv_img_1 = do_eval(adv_x, preds_adv, points_x_bar, points_y_bar,
                                    'test once attacked')
      np.save(os.path.join(save_path_data, 'images_once_attacked'), adv_img_1)
      np.save(os.path.join(save_path_data, 'logits_once_attacked'), logits_1)
      
    # counter attack 
    attack_params['max_iterations'] = 1024
      
    if CONFIG.DATASET == 'moon':  
      
      out_alpha2 = compute_epsilons_balls_alpha(x_mesh, np.squeeze(x_test),
                                                np.squeeze(adv_img_1), model_path,
                                                CONFIG.SAVE_PATH)
      attack_params['learning_rate'] = out_alpha2
      attack_params['const_a_min'] = -1
      attack_params['max_iterations'] = 2048
      
      plot_data(np.squeeze(adv_img_1), logits_1, CONFIG.SAVE_PATH+'data_pred1.png', x_mesh,
                logits_mesh)
      
    adv_adv_x = cw.generate(x, **attack_params) 
      
    x_k = np.concatenate((points_x, adv_img_1), axis=0)
    y_k = np.concatenate((points_y, logits_1), axis=0)
    
    if os.path.isfile( save_path_data + 'images_twice_attacked.npy' ):
      adv_img_2 = np.load(save_path_data + 'images_twice_attacked.npy')
      logits_2 = np.load(save_path_data + 'logits_twice_attacked.npy')
    else:
      # Evaluate the accuracy on adversarial examples
      preds_adv_adv = model.get_logits(adv_adv_x)
      logits_2, adv_img_2 = do_eval(adv_adv_x, preds_adv_adv, x_k, y_k,
                                    'test twice attacked')   
      
      np.save(os.path.join(save_path_data, 'images_twice_attacked'), adv_img_2)
      np.save(os.path.join(save_path_data, 'logits_twice_attacked'), logits_2)
    
    if CONFIG.DATASET == 'moon':  
      plot_data(np.squeeze(adv_img_2[:len_x]), logits_2[:len_x],
                CONFIG.SAVE_PATH+'data_pred2.png', x_mesh, logits_mesh)
      plot_data(np.squeeze(adv_img_2[len_x:]), logits_2[len_x:],
                CONFIG.SAVE_PATH+'data_pred12.png', x_mesh, logits_mesh)
      test_balls(np.squeeze(x_k), np.squeeze(adv_img_2), logits_0, logits_1, logits_2,
                 CONFIG.SAVE_PATH)
 
  compute_returnees(logits_0[len_x:], logits_1, logits_2[len_x:], logits_0[:len_x],
                    logits_2[:len_x], CONFIG.SAVE_PATH) 
  
  if x_test.shape[-1] > 1:
    num_axis=(1,2,3)
  else:
    num_axis=(1,2)
    
  D_p = np.squeeze(np.sqrt(np.sum(np.square(points_x-adv_img_2[:len_x]), axis=num_axis)))
  D_p_p = np.squeeze(np.sqrt(np.sum(np.square(adv_img_1-adv_img_2[len_x:]),
                                    axis=num_axis)))
  D_p_mod, D_p_p_mod = modify_D(D_p, D_p_p, logits_0[len_x:], logits_1, logits_2[len_x:],
                                logits_0[:len_x], logits_2[:len_x])
      
  if D_p_mod != [] and D_p_p_mod != []:
    plot_violins(D_p_mod, D_p_p_mod, CONFIG.SAVE_PATH)
    threshold_evaluation(D_p_mod, D_p_p_mod, CONFIG.SAVE_PATH)
    _ = compute_auroc(D_p_mod, D_p_p_mod, CONFIG.SAVE_PATH)
      
  plot_results_models(len_x, CONFIG.DATASET, CONFIG.SAVE_PATH)
  
  print('Time needed:', time.time()-start)

  return report