Exemple #1
0
def main():
    if (arguments.arch != "densenet" and arguments.arch != 'alexnet'):
        print(
            "Please, either choose densenet or alexnet as the model architecture, other models are not supported."
        )
        return

    device = "cpu"
    if (arguments.gpu):
        if (torch.cuda.is_available()):
            device = "cuda"
            print(" GPU IS AVAILABLE")
        else:
            print("GPU is not avaliable, instead CPU will be used")

    h.train(arguments.data_dir, device, arguments.epochs, arguments.arch,
            arguments.learning_rate, arguments.hidden_units,
            arguments.save_dir, arguments.checkpoint)
Exemple #2
0
def player(prev_play, opponent_history=[]):
  # --------- CONFIG ---------
  # the more, the better and slower
  n = 1000      # train on every n-th move
  epochs = 10 # how often run through traning loop

  global weight1, weight2
  info = False
  
  d = [[0,0,0]]
  i = "RPS".index(prev_play)
  d[0][i] = 1

  # TEST with cartesian -> out of memory
  # cartesian product (order matters, with replacement)
  dataY = list(itertools.product([0,1], repeat=3))
  dataY = list(filter(lambda x: x[i]==1, dataY))
  dataY = [list(x) for x in dataY] # tuples 2 list

  for dy in dataY:
    bt.addRows(d, [dy])  

  # ????
  # model fails at quincy/poor2, but basically works with poor!
  #bt.addRows(d, d)
  X, y = bt.getData()

  # we will retrain our model not on every move
  pr = []

  if len(X) % n == 0:
    weight1, weight2 = hlp.train(X, y, False, False, epochs)
    info = str(len(X))

  i_pred = np.array([1,1,1])
  pr = hlp.predict([weight1, weight2], i_pred)
  #print(pr)
  #sys.exit()

  # TODO
  # check foreach move prob and return a move
  # eg if most prob move is R the return P
  # -> extremely slow! 
  if pr[0]>pr[1] and pr[0]>pr[2]: # R
    guess = 'P'
  elif pr[1]>pr[0] and pr[1]>pr[2]: # P
    guess = 'S'
  else: # S
    guess = 'R'

  pr2 = [round(x,5) for x in pr]
  if info != False:
    print(info, pr2)
  return guess
Exemple #3
0
def main():
    input_args = get_input_args()
    gpu = torch.cuda.is_available() and input_args.gpu

    dataloaders, class_to_idx = helper.get_dataloders(input_args.data_dir)

    model, optimizer, criterion = helper.model_create(
        input_args.architectures,
        input_args.learning_rate,
        input_args.hidden_units,
        class_to_idx
        )

    if gpu:
        model.cuda()
        criterion.cuda()
    else:
        torch.set_num_threads(input_args.num_threads)

    epochs = 3
    print_every = 40
    helper.train(model, dataloaders['training'], epochs, print_every, criterion, optimizer, device='cpu')

    if input_args.save_dir:
        if not os.path.exists(input_args.save_dir):
            os.makedirs(input_args.save_dir)

        file_path = input_args.save_dir + '/' + input_args.architectures + '_checkpoint.pth'
    else:
        file_path = input_args.architectures + '_checkpoint.pth'

    helper.save_checkpoint(file_path,
                            model, optimizer,
                            input_args.architectures,
                            input_args.learning_rate,
                            input_args.epochs
                            )

    helper.validation(model, dataloaders['testing'], criterion)
    num_train_data = len(train_images)
    num_vali_data = len(vali_images)
    num_test_data = len(test_images)

    assert (num_train_data == len(train_masks) == 4131)
    assert (num_vali_data == len(vali_masks) == 584)
    assert (num_test_data == len(test_masks) == 600)

    # helper.check_environment()

    model = nn.build_model(image_shape, input_shape, num_classes)
    try:
        model.load_weights(weights_file)
        print("\nLoaded existing weights!")
    except OSError:
        if args.mode == 0:
            print("\nStart training new model!")
        else:
            print("\nCannot find existing weight file!")
            raise

    helper.show_model(model, structure_file)
    if args.mode == 0:
        helper.train(model, EPOCHS, BATCH_SIZE, LEARNING_RATE, class_colors,
                     train_data_folder, num_train_data, vali_data_folder,
                     num_vali_data, weights_file, loss_history_file)
    else:
        helper.output_prediction(model, image_shape, class_colors, BATCH_SIZE,
                                 test_data_folder, num_test_data,
                                 output_folder)
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 14 13:41:10 2020

@author: ASUS
"""
import helper

# Run before model training. Prepares the data for the next steps.
helper.random_train_test(
    90)  # Randomly splits the total archive into a training and test.
helper.prepare_data(
)  # Classifies digit images using .dat descriptors without manipulation.
helper.create_from_train_test(
)  # Creates a pickled data file from classified digit images for training.

# Training the model
helper.train(epochs=20)
    cat_to_name = json.load(f)

data_dir = args.data_directory
cuda = args.gpu
hidden_units = args.hidden_units
learning_rate = args.learning_rate
epochs = args.epochs

# loading and transforming the data
train_dir = data_dir + '/train'
valid_dir = data_dir + '/valid'
test_dir = data_dir + '/test'
trainloader, validloader, testloader = utility.transform(
    train_dir, valid_dir, test_dir)

device = torch.device('cuda' if cuda == True else 'cpu')

# adjusting the pretrained model
model = getattr(models, args.arch)(pretrained=True)
model = helper.set_classifier(model, hidden_units)

# Define loss function and optimizer
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.classifier.parameters(), lr=learning_rate)

# Move the model to GPU mode, if specified
model.to(device)

# Training the model
helper.train(epochs, 60, trainloader, validloader, model, optimizer, criterion)
def main(arg, pars):
    """
    
    
    """
    print("load env ..")
    env_name = ("Car-v0")
    #env = gym.make("Car-v0")
    env = suite_gym.load(env_name,
                         discount=arg.gamma,
                         max_episode_steps=arg.max_t)
    print_parameter(arg, pars)
    train_py_env = suite_gym.load(env_name,
                                  discount=arg.gamma,
                                  max_episode_steps=arg.max_t)
    eval_py_env = suite_gym.load(env_name,
                                 discount=arg.gamma,
                                 max_episode_steps=arg.max_t)
    train_env = tf_py_environment.TFPyEnvironment(train_py_env)
    eval_env = tf_py_environment.TFPyEnvironment(eval_py_env)
    print("env loaded")
    train_dir = os.path.join(arg.root_dir, 'network_weights')
    eval_dir = os.path.join(arg.root_dir, 'eval')

    train_env.reset()
    fc_layer_params = (arg.hidden_size_1, )

    optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=arg.lr)
    train_step_counter = tf.compat.v2.Variable(0)
    categorical_q_net = CategoricalQNetwork(train_env.observation_spec(),
                                            train_env.action_spec(),
                                            fc_layer_params=fc_layer_params)

    agent = categorical_dqn_agent.CategoricalDqnAgent(
        train_env.time_step_spec(),
        train_env.action_spec(),
        categorical_q_network=categorical_q_net,
        optimizer=optimizer,
        epsilon_greedy=arg.eps_start)

    train_metrics = [
        tf_metrics.NumberOfEpisodes(),
        tf_metrics.EnvironmentSteps(),
        tf_metrics.AverageReturnMetric(),
        tf_metrics.AverageEpisodeLengthMetric(),
    ]

    global_step = tf.compat.v1.train.get_or_create_global_step()

    train_checkpointer = common.Checkpointer(ckpt_dir=train_dir,
                                             agent=tf_agent,
                                             global_step=global_step,
                                             metrics=metric_utils.MetricsGroup(
                                                 train_metrics,
                                                 'train_metrics'))

    if arg.continue_training == False:
        tf_agent.initialize()
        if os.path.exists("network_weights/*"):
            os.remove("network_weights/*")
    else:
        print("Continue Training")
        train_checkpointer.initialize_or_restore()
    print("ready to go")
    eval_policy = tf_agent.policy
    collect_policy = tf_agent.collect_policy
    random_policy = random_tf_policy.RandomTFPolicy(train_env.time_step_spec(),
                                                    train_env.action_spec())
    replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer(
        data_spec=tf_agent.collect_data_spec,
        batch_size=train_env.batch_size,
        max_length=arg.buffer_size)
    tf_agent.collect_data_spec
    tf_agent.collect_data_spec._fields
    collect_data(train_env,
                 random_policy,
                 replay_buffer,
                 steps=arg.learn_start,
                 max_t=40)
    print("create dataset")
    dataset = replay_buffer.as_dataset(num_parallel_calls=3,
                                       sample_batch_size=arg.batch_size,
                                       num_steps=2).prefetch(3)
    iterator = iter(dataset)

    # (Optional) Optimize by wrapping some of the code in a graph using TF function.
    tf_agent.train = common.function(tf_agent.train)
    # Reset the train step
    tf_agent.train_step_counter.assign(0)
    avg_return = compute_avg_return(eval_env, tf_agent.policy,
                                    arg.num_eval_episodes)
    returns = [avg_return]
    returns_average = [avg_return]
    train_loss_average = [1]
    score = 0
    scores_window = deque(maxlen=100)  # last 100 scores
    total_train_loss = deque(maxlen=100)  # last 100 scores

    train(arg, tf_agent, train_env, eval_env, replay_buffer, iterator,
          train_checkpointer)
Exemple #8
0
def main():
    # read data
    (x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()

    # read data
    (x_train, y_train, x_test, y_test) = preprocess(x_train, y_train, x_test,
                                                    y_test)
    # Reshape the data inputs such that we can put those inputs into MLP
    train_inputs = np.reshape(x_train, (-1, 28 * 28))
    test_inputs = np.reshape(x_test, (-1, 28 * 28))

    orig_model = MLP(28 * 28)

    # train original model
    epochs = 10
    print("Training Original MLP...")
    for i in range(epochs):
        train(orig_model, train_inputs, y_train)
        test_acc = test(orig_model, test_inputs, y_test)
        print("Epoch: {} ------ Testing accuracy: {}".format(i + 1, test_acc))

    # calculate fgs and deepfool for original model, on both test set and training set
    print("Creating DeepFool images set... will take aobut 5 mins")
    (train_adv_orig, train_r_orig) = deepfool(orig_model, train_inputs)
    (test_adv_orig, test_r_orig) = deepfool(orig_model, test_inputs)

    # fine tuning
    tuning_model = MLP_tuning(28 * 28, orig_model)
    epochs = 5
    print("Training Fine Tuning MLP...")
    for i in range(epochs):
        train(tuning_model, train_adv_orig, y_train)
        tuning_test_acc = test(tuning_model, test_adv_orig, y_test)
        print("Epoch: {} ------ Testing accuracy: {}".format(
            i + 1, tuning_test_acc))

    # train deepdefense model
    regu_model = regu_MLP(28 * 28, orig_model)
    epochs = 5
    print("Training Deep Defense MLP...")
    for i in range(epochs):
        regu_train(regu_model, train_adv_orig, y_train, train_r_orig)
        regu_test_acc = test(regu_model, test_adv_orig, y_test)
        print("Epoch: {} ------ Testing accuracy: {}".format(
            i + 1, regu_test_acc))

    # keep training original model for comparison
    epochs = 5
    print("Training MLP for 5 more epochs...")
    for i in range(epochs):
        train(orig_model, train_inputs, y_train)
        test_accu = test(orig_model, test_inputs, y_test)
        print("Epoch: {} ------ Testing accuracy: {}".format(i + 1, test_accu))

    ################### Evaluation #########################
    # ROC curve on deepfool testing image generated from origianl MLP model
    roc1 = roc(orig_model, test_adv_orig, y_test, "Vanilla MLP")
    roc2 = roc(tuning_model, test_adv_orig, y_test, "Fine tuning MLP")
    roc3 = roc(regu_model, test_adv_orig, y_test, "Deep Defense MLP")
    AUC = pd.DataFrame(
        {
            "Vanilla MLP": list(roc1.values()),
            "Fine-Tune MLP": list(roc2.values()),
            "Deep Defense": list(roc3.values())
        },
        index=["label " + str(i + 1) for i in range(10)])
    print("Area Under the Curve:")
    print(AUC)

    # testing acc on benign images
    benign_test_acc = pd.DataFrame(
        {
            "Vanilla MLP": test(orig_model, test_inputs, y_test),
            "Fine-Tune MLP": test(tuning_model, test_inputs, y_test),
            "Deep Defense": test(regu_model, test_inputs, y_test)
        },
        index=["TestAcc"])

    # rho2 scores
    (test_adv_orig2, test_r_orig2) = deepfool(orig_model, test_inputs)
    (test_adv_tuning, test_r_tuning) = deepfool(tuning_model, test_inputs)
    (test_adv_regu, test_r_regu) = deepfool(regu_model, test_inputs)

    regu_rho2 = rho2(test_r_regu, test_inputs)
    tuning_rho2 = rho2(test_r_tuning, test_inputs)
    orig_rho2 = rho2(test_r_orig2, test_inputs)
    rho2_all = pd.DataFrame(
        {
            "Vanilla MLP": orig_rho2,
            "Fine-Tune MLP": tuning_rho2,
            "Deep Defense": regu_rho2
        },
        index=["Rho2 Score"])

    # plot accuracy on FGS images
    epsilon_ref_100, epsilon_ref_50, epsilon_ref_20 = plot_acc_on_FGS(
        orig_model, regu_model, tuning_model, test_inputs, y_test,
        test_adv_orig)
    epsilon_list = [epsilon_ref_20, epsilon_ref_50, epsilon_ref_100]

    # calculating testing accuracy of vanilla, regu, and finetune on FGS examples with these three epsilon values
    pert_test_orig = FGS(orig_model, test_inputs, y_test, 1)
    pert_test_regu = FGS(regu_model, test_inputs, y_test, 1, True,
                         test_adv_orig)
    pert_test_tuning = FGS(tuning_model, test_inputs, y_test, 1)

    FGS_orig_test_acc = list(
        map(
            lambda x: test(orig_model, x * pert_test_orig + test_inputs, y_test
                           ), epsilon_list))
    FGS_regu_test_acc = list(
        map(
            lambda x: test(regu_model, x * pert_test_regu + test_inputs, y_test
                           ), epsilon_list))
    FGS_tuning_test_acc = list(
        map(
            lambda x: test(tuning_model, x * pert_test_tuning + test_inputs,
                           y_test), epsilon_list))

    acc_fgs = pd.DataFrame(
        {
            "Vanilla MLP": FGS_orig_test_acc,
            "Fine-Tune MLP": FGS_tuning_test_acc,
            "Deep Defense": FGS_regu_test_acc
        },
        index=["[email protected]", "[email protected]", "[email protected]"])
    result_table = pd.concat([benign_test_acc, rho2_all, acc_fgs],
                             ignore_index=False).transpose()
    print(result_table)
Exemple #9
0
            trainset_nm = hp.CIFARNegativeMining(
                indices,
                root="./data",
                train=True,
                download=True,
                transform=train_augment,
            )
            trainloader_nm = torch.utils.data.DataLoader(
                trainset_nm,
                batch_size=args.batch_size,
                shuffle=True,
                num_workers=20,
                drop_last=True,
            )
        train_results = hp.train(
            model, trainloader_nm, optimizer, scheduler, args, device, log_temp
        )

    else:
        train_results = hp.train(
            model, dataloader_train_pairs, optimizer, scheduler, args, device, log_temp
        )

    scheduler.step(epoch)
    # scheduler.step(train_results["avg_loss"])
    total_train_time += train_results["compute_time"]

    optim_param = unsupervised_feature_model.get_lr(optimizer)
    norm_const = model.norm_const().item()

    # KNN accuracy
Exemple #10
0
Fichier : nn.py Projet : avinik/Al
print(modelPackage.modelName)

model = createModel(modelPackage)
modelPackage.model = model
# plot_model(model, to_file='model.png')

precision = 0
recoil = 0
f1 = 0
file = open("Results/"+str(datasetName)+str(samplingMethod) + str(modelName)+ "_Results.txt", "w+")

#ctive Learning Starts here

if datasetName == "Twitter":
    #Initial Training The Model
    model = train(model, train_batch, epochs, modelPackage)
    pre_test, rec_test, f1_test = test(model, test_batch, idx2Label, modelPackage)
    file.write("Initial Precision: " + str(pre_test) + " Initial Recoil : "+str(rec_test) + " Initial F1_Score : " + str(f1_test)+ " Data size : " + str(len(train_batch))+ "\n\n")
    file.flush()
    l = len(learn_batch)
    flagged = []
    last = []
    for i in range(3):
        last.append(0)
    for i in range(l):
        flagged.append(0)
    iter = 0
    while(True):
        modelPackage.model = model

        #Finding the actice data
                                                   approximator_model,
                                                   target_model,
                                                   batch_size,
                                                   action_space,
                                                   gamma=discount_rate,
                                                   beta=1 -
                                                   (episode / n_episode))
    elif take_sample.__name__ == 'uniform_sampling':
        print("Uses Uniform Experience Replay Sampling")
        experience_batch = take_sample(D, batch_size)
    else:
        print("Uses Random Experience Replay Sampling")
        experience_batch = take_sample(D, batch_size)

    history = utils.train(approximator_model, target_model, experience_batch,
                          importance, batch_size, action_space, discount_rate,
                          tensorflow_callback)
    # Wrap up
    stats_nonzeros = (tf.math.count_nonzero(
        [exp[1] for exp in experience_batch]) / batch_size) * 100
    stats_loss = history.history.get("loss", [0])[0]
    stats_time_end = np.round(time.time() - start_time, 2)
    stats_memory_usage = np.round(process.memory_info().rss / (1024**3), 2)
    sample_exp = random.choice(experience_batch)

    print(f"Current memory consumption is {stats_memory_usage} GB's")
    print(f"Number of information yielding states: {stats_nonzeros}")
    print(
        f"Loss of episode {episode} is {stats_loss} and took {stats_time_end} seconds with {stats_frame_cnt}"
    )
    print(f"TOTAL REWARD: {stats_rewards}")
int2char = dict(enumerate(chars))
char2int = {ch: ii for ii, ch in int2char.items()}
encoded = np.array([char2int[ch] for ch in text])

# define and print the net
net = CharRNN(chars, n_hidden=512, n_layers=2)
print(net)

n_seqs, n_steps = 128, 100

# you may change cuda to True if you plan on using a GPU!
# also, if you do, please INCREASE the epochs to 25
helper.train(net,
             encoded,
             epochs=1,
             n_seqs=n_seqs,
             n_steps=n_steps,
             lr=0.001,
             cuda=False,
             print_every=10)

print(helper.sample(net, 2000, prime='Anna', top_k=5, cuda=False))

# change the name, for saving multiple files
model_name = 'rnn_1_epoch.net'

checkpoint = {
    'n_hidden': net.n_hidden,
    'n_layers': net.n_layers,
    'state_dict': net.state_dict(),
    'tokens': net.chars
}
Exemple #13
0
parser = argparse.ArgumentParser()
parser.add_argument('data_dir', action = 'store', help='image directory')
parser.add_argument('--save_dir', action = 'store', help = 'save checkpoint')
parser.add_argument('--in_file', type = str, default = "label_map.json", help = 'input json file')
parser.add_argument('--arch', action = 'store',  default = 'vgg19', help = 'architecture')
parser.add_argument('--epochs', action = 'store',  type = int, default = 6, help = 'number of epochs')
parser.add_argument('--learning_rate', action = 'store', type = float, default = 0.03, help = 'learning rate')
parser.add_argument('--hidden_units', action = 'store',  type = int, default = 2900, help = 'number of hidden units')
parser.add_argument('--out_size', action = 'store',  type = int, default = 102, help = 'number of outputs')
parser.add_argument('--drop_p', type = float, default = 0.5, help = 'probability of dropping the weights')
parser.add_argument('--gpu', action = 'store_true', help = 'use gpu')
args = parser.parse_args()
json_path = args.in_file
label_map = helper.load_label_map(json_path)
data_dir = args.data_dir
train_data, validation_data, test_data, trainloader, validloader, testloader = helper.preprocess(data_dir)
model_ = classifier.build_model(args.hidden_units, len(label_map), args.drop_p, args.arch)
model = helper.premodel(args.arch)

for param in model.parameters():
    param.requires_grad = False
in_size = helper.get_size(model, args.arch)
model.classifier = helper.Network(in_size, args.out_size, [args.hidden_units], drop_p = 0.5)
criterion = nn.NLLLoss()
optimizer = optim.SGD(model.classifier.parameters(), lr = args.learning_rate)
helper.train(model, trainloader, validloader, criterion, optimizer, args.epochs, 40, args.gpu)
test_accuracy, test_loss = helper.valid_loss_acc(model, testloader, criterion, args.gpu)
print("Test Accuracy: {:.4f} ".format(test_accuracy), "Test Loss: {:.4f}".format(test_loss))
helper.save_checkpoint(model, train_data, optimizer, args.save_dir, args.arch)
Exemple #14
0
        num_train_data = 2
        num_test_data = 2

    #######################################################################
    ### The original segnet (without pooling index)
    #######################################################################
    weights_file = "segnet_weights.h5"
    structure_file = "segnet_model.txt"
    output_folder = "./segnet_inference"
    loss_history_file = "segnet_loss.pkl"

    model = segnet.build_model(image_shape, input_shape, num_classes)
    helper.show_model(model, structure_file)

    helper.train(model, epochs, batch_size, learning_rate, class_colors,
                 train_data_folder, num_train_data, vali_data_folder,
                 num_vali_data, weights_file, loss_history_file)

    helper.output_prediction(model, image_shape, class_colors, batch_size,
                             test_data_folder, num_test_data, output_folder)

    #######################################################################
    ### The depthwise segnet
    ######################################################################
    weights_file = "depthwise_segnet_weights.h5"
    structure_file = "depthwise_segnet_model.txt"
    output_folder = "./depthwise_segnet_inference"
    loss_history_file = "depthwise_segnet_loss.pkl"

    model = depthwise_segnet.build_model(image_shape, input_shape, num_classes)
    helper.show_model(model, structure_file)
    torch.manual_seed(args.seed)
    print(args)
    # Load the training data.
    train_dataset, data_loaders = loaders(args.batch_size, args.data_dir)
    print(data_loaders)

    classifier, model = run_the_model(args.arch, args.hidden_units,
                                      len(train_dataset.classes))

    if use_cuda:
        model = model.cuda()

    criterion = nn.CrossEntropyLoss()

    optimizer = optim.SGD(model.classifier.parameters(), lr=args.learning_rate)

    trained_model = train(args.epochs, data_loaders, model, optimizer,
                          criterion, use_cuda, 'outputs/model_results.pt')

    if args.save_dir is not None:
        checkpoint = {
            'arch': args.arch,
            'output_size': len(train_dataset.classes),
            'classifier': classifier,
            'model_state': trained_model.state_dict(),
            'optimizer': optimizer.state_dict(),
            'class_to_index': train_dataset.class_to_idx
        }
        torch.save(checkpoint, args.save_dir + "checkpoint.pth")
        print("Checkpoint saved!")
Exemple #16
0
model = vgg16(pretrained=True).to(args.device)

mask = checkpoint['mask']

# Conv 5-3 [output]
model.features[-3] = conv_post_mask(model.features[-3], mask[0])
# FC 6 [input, output]
model.classifier[0] = linear_mask(model.classifier[0], mask[0], mask[1])
# FC 7 [input]
model.classifier[3] = linear_pre_mask(model.classifier[3], mask[1])

model.load_state_dict(checkpoint['state_dict'])

criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(model.parameters(), args.lr, weight_decay=1e-4)

best_top1 = 0

for e in range(args.epoch):
    train(model, train_loader, criterion, optimizer,
          f"EPOCH : [{e + 1} / {args.epoch}]")

    top1, top5 = valid(model, valid_loader, criterion)

    print(f"top1 : {top1} / top5 : {top5}")

    if top1 > best_top1:
        best_top1 = top1

        torch.save({'state_dict': model.state_dict()}, args.save_path + '.tar')
Exemple #17
0
                                          shuffle=True)
testloader = torch.utils.data.DataLoader(test_data, batch_size=64)
validloader = torch.utils.data.DataLoader(validate_data, batch_size=64)

if args.arch == 'vgg':
    input_size = 25088
    model = models.vgg16(pretrained=True)
elif args.arch == 'resnet':
    input_size = 2048
    model = models.alexnet(pretrained=True)

for param in model.parameters():
    param.requires_grad = False
model.classifier = nn.Sequential(nn.Linear(input_size, args.hidden_layers),
                                 nn.ReLU(), nn.Dropout(p=0.5),
                                 nn.Linear(args.hidden_layers, 102),
                                 nn.LogSoftmax(dim=1))
print(model)

criterion = nn.NLLLoss()
device = args.gpu
optimizer = optim.Adam(model.classifier.parameters(), args.lr)
loss, accuracy = helper.validate(model, criterion, testloader, device)
print(f"loss: {loss} \n Accuracy: {accuracy}")
epochs = args.epochs
model = helper.train(model, optimizer, criterion, epochs, trainloader,
                     validloader, device)
helper.accuracy(model, testloader, device)
helper.save(model, train_data, args.arch, input_size, args.hidden_layers,
            epochs, args.lr)