コード例 #1
0
ファイル: test.py プロジェクト: Neclow/ee559-project2
def run_train(net,
              criterion,
              optimizer,
              eta,
              plot_data=False,
              plot_training=False,
              seed=42):
    torch.manual_seed(seed)

    # Load data
    trainX, trainY, testX, testY = load_data(plotting=plot_data)

    # Initialize weights
    net.weight_initialization()

    time.sleep(2)

    # Train model
    losses = train(net,
                   trainX,
                   trainY,
                   input_criterion=criterion,
                   input_optimizer=optimizer,
                   eta=eta,
                   verbose=True)

    # Compute accuracy
    net.eval()
    print('Train accuracy: %.4f' % compute_accuracy(net, trainX, trainY))
    print('Test accuracy: %.4f \n' % compute_accuracy(net, testX, testY))

    # Create if plots (if flag is True)
    if plot_training:
        train_visualization(net, losses, testX, testY)
コード例 #2
0
def train(model, train_loader, val_loader, criterion, opt, n_epochs,
          scheduler):
    val = []
    device = torch.device(
        "cuda") if torch.cuda.is_available() else torch.device("cpu")
    print("Starting Training Loop...")
    sys.stdout.flush()
    for epoch in range(n_epochs):
        model.train()
        n_iters = 0
        train_loss = []
        acc_train = []

        for batch in train_loader:
            model.zero_grad()
            image, labels = batch
            image, labels = image.to(device), labels.to(device)
            outputs = model(image)
            loss = criterion(outputs, labels)
            loss.backward()
            opt.step()
            acc = compute_accuracy(outputs, labels)
            train_loss.append(loss.item())
            acc_train.append(acc.item())
            n_iters += 1
        loss_train = np.mean(train_loss)
        train_acc = round(np.mean(acc_train), 3)

        model.eval()
        n_iters = 0
        loss_val = []
        acc_val = []
        for batch in val_loader:
            image, label = batch
            image, label = image.to(device), label.to(device)
            pred = model(image)
            val_loss = criterion(pred, label)

            loss_val.append(val_loss.item())
            acc = compute_accuracy(pred, label)
            acc_val.append(acc.item())

            n_iters += 1
        val_acc = round(np.mean(acc_val), 3)
        val_loss = np.mean(loss_val)
        if scheduler is not None:
            scheduler.step()
        if epoch > 0:
            if val_acc > val[epoch - 1]:
                torch.save(model.state_dict(),
                           'mobilenet' + args.version + '.pth')
        val.append(val_acc)
        print(
            "Epoch {} | Training loss {}  | Testing loss  {} | Training Accuracy {}  | Testing Accuracy  {}"
            .format(epoch, loss_train, val_loss, train_acc, val_acc))
コード例 #3
0
ファイル: test.py プロジェクト: Neclow/ee559-project1
def run_train(model, alpha, eta, decay, plotting=False, verbose=True, seed=14):
    '''
    Run a single training.

    Parameters
    -------
    model
        The neural network
    alpha
        The auxiliary loss coefficient
    eta
        Learning rate for training
    decay
        L2-regularization coefficient
    plotting
        If true, plots training loss and training accuracy at each epoch
    verbose
        If true, gives additional information during training (loss at each epoch)
    seed
        Random seed (for reproducibility)
    '''

    # Generate data
    torch.manual_seed(seed)  # For reproducbility
    train_loader, test_loader = load_data(seed=seed)

    # Apply training mode and weight initialization
    model.train()
    model.apply(weight_initialization)

    # Train model
    start = time.time()
    tr_loss, tr_acc = train(model,
                            train_loader,
                            alpha=alpha,
                            eta=eta,
                            decay=decay,
                            verbose=verbose,
                            plotting=plotting)

    print('\n Training ended. Training time: %.2f s \n' %
          (time.time() - start))

    model.eval()  # Disable dropout layers for testing
    final_train_accuracy = compute_accuracy(model, train_loader)
    final_test_accuracy = compute_accuracy(model, test_loader)

    # Visualize data if plotting
    if plotting:
        train_visualization(model, tr_loss, tr_acc, final_test_accuracy)

    print('Train accuracy: %.4f // Test accuracy: %.4f' %
          (final_train_accuracy, final_test_accuracy))
コード例 #4
0
def eval(model, val_loader, save_img, folder):
    val_accuracy_batch = []
    device = torch.device(
        "cuda") if torch.cuda.is_available() else torch.device("cpu")
    n = 0
    for X_batch, y_batch in tqdm(val_loader):
        X_batch_gpu, y_batch = X_batch.to(device), y_batch.to(device)
        logits = model(X_batch_gpu)

        accuracy = compute_accuracy(logits, y_batch, device=device)
        val_accuracy_batch.append(accuracy.item())
        if save_img:
            labels = train_loader.dataset.class_to_idx
            classes = list(labels.keys())
            for i in range(len(X_batch_gpu)):
                pred_num = torch.argmax(logits, dim=1)
                pred_label = classes[list(labels.values()).index(pred_num[i])]
                true_label = classes[list(labels.values()).index(y_batch[i])]
                img = X_batch_gpu[i] / 2 + 0.5  # unnormalize
                npimg = img.to('cpu').numpy().transpose(1, 2, 0)

                fig = plt.figure(figsize=(1, 1))
                fig.figimage(npimg,
                             xo=0,
                             yo=0,
                             origin='upper',
                             resize=True,
                             norm=True)
                if true_label == pred_label:
                    fig.suptitle(pred_label, color="green", fontsize="x-small")
                else:
                    fig.suptitle(pred_label, color="red", fontsize="x-small")


#                     plt.imsave(os.path.join('./{}/'.format(folder) + 'img{}_{}_{}.png'.format(n,true_label,pred_label)),npimg)
                plt.savefig(
                    os.path.join(
                        './{}/'.format(folder) +
                        'img{}_{}_{}.png'.format(n, true_label, pred_label)))
                plt.close(fig)
                n += 1
    val_accuracy_overall = np.mean(val_accuracy_batch) * 100
    return val_accuracy_overall
コード例 #5
0
    with open(detection_data_fname, 'rb') as in_file:
        all_frames = pickle.load(in_file)
    detected_video_objects = util.smooth_objects(all_frames)
    util.align_objects_to_screen(video_idx, detected_video_objects)
    detected_objects.append(detected_video_objects)

participant_accuracies = []
for participant in participants:
    print('Running participant {}...'.format(participant.ID))
    participant_videos = [participant.videos[i - 1] for i in VIDEOS]
    video_accuracies = []
    for (experiment_video, video_objects) \
            in zip(participant_videos, detected_objects):

        mle = hmm.forwards_backwards(SIGMA, TAU, experiment_video,
                                     video_objects)
        ground_truth = [frame.target for frame in experiment_video.frames]
        video_accuracy = metrics.compute_accuracy(mle, ground_truth)
        print('Video {} accuracy: {}'.format(experiment_video.video_idx,
                                             video_accuracy))
        video_accuracies.append(video_accuracy)

    participant_accuracy_mean, participant_accuracy_ste = metrics.mean_and_ste(
        video_accuracies)
    print('Participant accuracy: {} +/- {}'.format(participant_accuracy_mean,
                                                   participant_accuracy_ste))
    participant_accuracies.append(participant_accuracy_mean)

accuracy_mean, accuracy_ste = metrics.mean_and_ste(participant_accuracies)
print('Overall accuracy: {} +/- {}'.format(accuracy_mean, accuracy_ste))
コード例 #6
0
def train(net, train_loader, alpha, eta, decay,
          n_epochs=25, verbose=False, plotting=False):
    '''
    Train a neural network

    Parameters
    -------
    model
        The neural network
    train_loader
        The training set (DataLoader)
    alpha
        Auxiliary loss coefficient for Siamese networks (0, 0.5 or 1s)
        Not taken into account for non-Siamese networks
    eta
        Learning rate
    decay
        L2-regularization coefficient
    n_epochs
        Number of epochs
    verbose
        If true, print loss at each epoch
    plotting
        If true, collects training accuracy at each epoch for future plotting

    Returns
    -------
    tr_losses (tensor)
        Training losses collected at each epoch
    tr_accuracies (tensor)
        Training accuracies collected at each epoch
        If plotting is False, tr_accuracies will only consist of zeros.
    '''
    
    aux_crit = nn.CrossEntropyLoss()
    binary_crit = nn.BCELoss()
    optimizer = optim.Adam(net.parameters(), lr=eta, weight_decay=decay)

    tr_losses = torch.zeros(n_epochs)
    tr_accuracies = torch.zeros(n_epochs)

    for e in range(n_epochs):
        # Reset training/validation loss
        tr_loss = 0

        # Training mode
        net.train()

        for (trainX, trainY, trainC) in train_loader:
            # Forward pass
            out, aux = net(trainX)

            # Binary classification loss
            binary_loss = binary_crit(out, trainY.float())

            # Compute auxiliary loss for Siamese netwoks
            if aux is not None:
                # Separate outputs and target classes for each image
                aux1, aux2 = aux.unbind(1)
                c1, c2 = trainC.unbind(1)

                # Auxiliary loss
                aux_loss = aux_crit(aux1, c1) + aux_crit(aux2, c2)
            else:
                # Total loss
                aux_loss = 0
                
            # Total loss = Binary loss + alpha*auxiliary loss
            total_loss = binary_loss + alpha*aux_loss
            tr_loss += total_loss.item()

            # Backward pass
            optimizer.zero_grad()
            total_loss.backward()
            optimizer.step()

        if plotting:
            # Collect accuracy data for later plotting
            tr_accuracies[e] = compute_accuracy(net, train_loader)

        # Collect loss data
        tr_losses[e] = tr_loss

        if verbose:
            print('Epoch %d/%d, Binary loss: %.3f, Auxiliary loss: %.3f' %
                  (e+1, n_epochs, binary_loss, aux_loss))

    return tr_losses, tr_accuracies
コード例 #7
0
def trial(net, alpha, eta, decay, n_trials=30, n_epochs=25, start_seed=0, verbose=False):
    '''
    Perform a trial on a network, i.e. several rounds of training.

    Parameters
    -------
    net
        The neural network
    alpha
        Auxiliary loss coefficient for Siamese networks (0, 0.5 or 1s)
    eta
        Learning rate
    decay
        L2-regularization coefficient
    n_trials
        Number of trainings to perform (Default: 30)
    n_epochs
        Number of training epochs per trial (Default: 25)
    start_seed
        Indicates from where seeds are generated.
        start_seed = 0 with 20 trials means that seeds will be 0, ..., 19

        (This is useful to ensure that different datasets were used for
        hyperparameter optimization and trials)
    verbose
        If true, prints final loss, training accuracy and test accuracy for each trial

    Returns
    -------
    all_losses
        Training losses accumulated at each epoch for each trial
    tr_accuracies
        Final train accuracy reported at the end of each trial
    te_accuracies
        Final test accuracy reported at the end of each trial
    '''
    
    all_losses = torch.zeros((n_trials, n_epochs))
    tr_accuracies = torch.zeros(n_trials)
    te_accuracies = torch.zeros(n_trials)
    for i in range(n_trials):
        # Shuffle data
        torch.manual_seed(start_seed+i)
        train_loader, test_loader = load_data(seed=i)

        # Reset weights
        net.train()
        net.apply(weight_initialization)

        # Train
        start = time.time()
        tr_loss, _ = train(net, train_loader, alpha=alpha, 
                           eta=eta, decay=decay, n_epochs=n_epochs)
        
        print('Trial %d/%d... Training time: %.2f s' % (i+1, n_trials, time.time()-start))

        # Collect data
        all_losses[i] = tr_loss

        # Compute train and test accuracy
        net.eval() # Disable dropout layers in eval mode
        with torch.no_grad():
            tr_accuracies[i] = compute_accuracy(net, train_loader)
            te_accuracies[i] = compute_accuracy(net, test_loader)

        if verbose:
            print('Loss: %.4f, Train acc: %.4f, Test acc: %.4f' %
                  (tr_loss[-1], tr_accuracies[i], te_accuracies[i]))

    # Print trial results
    print('Train accuracy - mean: %.4f, std: %.4f, median: %.4f' %
         (tr_accuracies.mean(), tr_accuracies.std(), tr_accuracies.median()))
    print('Test accuracy - mean: %.4f, std: %.4f, median: %.4f' %
         (te_accuracies.mean(), te_accuracies.std(), te_accuracies.median()))

    return all_losses, tr_accuracies, te_accuracies
コード例 #8
0
def train_pred_labels(model, train, val, auxiliary_weight=1., mini_batch_size=100,
                          lr=3e-4, nb_epochs=100, patience=20, **kwargs):
    """
        Train the PyTorch model on the training set.

        Parameters
        ----------
        model : PyTorch NN object
            PyTorch neural network model
        train : TensorDataset
            Dataset containing inputs, targets, classes for training (train_inner)
        val : TensorDataset
            Dataset containing inputs, targets, classes for validation
        auxiliary_weight: float
            Weight of auxiliary loss
        mini_batch_size : int
            The size of the batch processing size
        lr : float
            Learning rate for the model training
        nb_epochs : int
            The number of epochs used to train the model
        patience : int
            number of epochs without val improvement for early stopping (None to disable)

        Returns
        -------

        (NN object, train loss history, val accuracy history)
    """
    train_losses = []
    val_accs = []

    # Defining the optimizer for GD
    optimizer = torch.optim.Adam(model.parameters(), lr=lr)

    # Defining the criteria to calculate losses
    criterion = nn.BCEWithLogitsLoss()         # for Binary Classification
    criterion_digit = nn.CrossEntropyLoss()    # for MultiClass Classification

    # Defining the early stopping criterion
    early_stopping = EarlyStopping(patience)

    # Defining DataLoaders for better mini-batches handling
    # Shuffling makes batches differ between epochs and results in more robust training
    train_loader = DataLoader(train, mini_batch_size, shuffle=True)

    # Learning loop
    for e in range(nb_epochs):
        # Train the input dataset by dividing it into mini_batch_size small datasets
        for train_input, train_target, train_class in train_loader:
            output, output_first_digit, output_second_digit = model(train_input)
            loss_comparison = criterion(output, train_target)
            loss_digits = criterion_digit(output_first_digit, train_class[:, 0]) + \
                          criterion_digit(output_second_digit, train_class[:, 1])
            loss = loss_comparison + auxiliary_weight * loss_digits

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            train_losses.append(loss.item())
        val_accs.append(compute_accuracy(model, val, mini_batch_size))

        # If the validation accuracy has not improved enough in the last patience epochs
        # then stop training
        if early_stopping(val_accs[-1]):
            break

    return model, train_losses, val_accs
コード例 #9
0
def main():
    bEntropy = True
    input_model_paths = ['/volume/USERSTORE/kris_po/Suturing/features_Resnet_Imagenet/ensemble_B/BidirectionalLSTM_0.10_1200_1_250_01_0.50_2.0000/C/999/','/volume/USERSTORE/kris_po/Suturing/features_Resnet_Imagenet/ensemble_B/BidirectionalLSTM_0.10_1200_1_250_01_0.50_2.0000/D/999','/volume/USERSTORE/kris_po/Suturing/features_Resnet_Imagenet/ensemble_B/BidirectionalLSTM_0.10_1200_1_250_01_0.50_2.0000/E/999/','/volume/USERSTORE/kris_po/Suturing/features_Resnet_Imagenet/ensemble_B/BidirectionalLSTM_0.10_1200_1_250_01_0.50_2.0000/F/999/','/volume/USERSTORE/kris_po/Suturing/features_Resnet_Imagenet/ensemble_B/BidirectionalLSTM_0.10_1200_1_250_01_0.50_2.0000/G/999/','/volume/USERSTORE/kris_po/Suturing/features_Resnet_Imagenet/ensemble_B/BidirectionalLSTM_0.10_1200_1_250_01_0.50_2.0000/H/999/','/volume/USERSTORE/kris_po/Suturing/features_Resnet_Imagenet/ensemble_B/BidirectionalLSTM_0.10_1200_1_250_01_0.50_2.0000/I/999/']

    '''
    input_model_paths = ['/volume/USERSTORE/kris_po/Suturing/features_Resnet_Imagenet/logs_translate_ensemble_MCdropout/BidirectionalLSTM_0.10_1200_1_250_01_0.50_2.0000/D/999/']
    
    # For NaiveEnsemble
    input_model_paths = ['/volume/USERSTORE/kris_po/Suturing/features_Resnet_Imagenet/logs_translate_ensemble_/BidirectionalLSTM_0.50_1200_1_512_03_0.50_2.0000/B_C_D_E_F_G_H_I/1/',
                         '/volume/USERSTORE/kris_po/Suturing/features_Resnet_Imagenet/logs_translate_ensemble_/BidirectionalLSTM_0.25_1200_1_512_03_0.50_2.0000/B_C_D_E_F_G_H_I/1/',
                         '/volume/USERSTORE/kris_po/Suturing/features_Resnet_Imagenet/logs_translate_ensemble_/BidirectionalLSTM_0.15_1200_1_512_03_0.50_2.0000/B_C_D_E_F_G_H_I/1/',
                         '/volume/USERSTORE/kris_po/Suturing/features_Resnet_Imagenet/logs_translate_ensemble_/BidirectionalLSTM_0.10_1200_1_512_03_0.50_2.0000/B_C_D_E_F_G_H_I/1/']
    '''
    '''
    #For Bootstrap Random Prior Ensemble
    input_model_paths=['/volume/USERSTORE/kris_po/Suturing/features_Resnet_Imagenet/logs_translate_ensemble_/BidirectionalLSTMWithRandomPrior_0.10_1200_1_250_01_0.50_2.0000/B/999/',
                       '/volume/USERSTORE/kris_po/Suturing/features_Resnet_Imagenet/logs_translate_ensemble_/BidirectionalLSTMWithRandomPrior_0.10_1200_1_250_01_0.50_2.0000/C/999/',
                       '/volume/USERSTORE/kris_po/Suturing/features_Resnet_Imagenet/logs_translate_ensemble_/BidirectionalLSTMWithRandomPrior_0.10_1200_1_250_01_0.50_2.0000/E/999/',
                       '/volume/USERSTORE/kris_po/Suturing/features_Resnet_Imagenet/logs_translate_ensemble_/BidirectionalLSTMWithRandomPrior_0.10_1200_1_250_01_0.50_2.0000/F/999/',
                       '/volume/USERSTORE/kris_po/Suturing/features_Resnet_Imagenet/logs_translate_ensemble_/BidirectionalLSTMWithRandomPrior_0.10_1200_1_250_01_0.50_2.0000/G/999/',
                       '/volume/USERSTORE/kris_po/Suturing/features_Resnet_Imagenet/logs_translate_ensemble_/BidirectionalLSTMWithRandomPrior_0.10_1200_1_250_01_0.50_2.0000/H/999/']

    '''
    '''
    #For Random Prior
    input_model_paths=['/volume/USERSTORE/kris_po/Suturing/features_Resnet_Imagenet/logs_translate_ensemble_/BidirectionalLSTMWithRandomPrior_0.10_1200_1_250_01_0.50_2.0000/B_C_D_E_F_G_H_I/1/',
                     '/volume/USERSTORE/kris_po/Suturing/features_Resnet_Imagenet/logs_translate_ensemble_/BidirectionalLSTMWithRandomPrior_0.25_1200_1_250_01_0.50_2.0000/B_C_D_E_F_G_H_I/1/',
                     '/volume/USERSTORE/kris_po/Suturing/features_Resnet_Imagenet/logs_translate_ensemble_/BidirectionalLSTMWithRandomPrior_0.50_1200_1_250_01_0.50_2.0000/B_C_D_E_F_G_H_I/1/']
    '''

    data_dir ='/volume/USERSTORE/kris_po/Suturing/features_Resnet_Imagenet/'
    data_filename ='standardized_data_translate_crop.pkl'
    model_type = 'BidirectionalLSTM' #'BidirectionalLSTMWithRandomPrior' #
    test_users = 'D'
    test_users = test_users.split(' ')
    test_trial = 1

    dataset = data.Dataset(data_dir, data_filename, model_type)
    train_raw_seqs, test_raw_seqs = dataset.get_splits(test_users, test_trial)
    test_triplets = [data.prepare_raw_seq(seq) for seq in test_raw_seqs]
    test_input_seqs, test_reset_seqs, test_label_seqs = zip(*test_triplets)
    input_size = dataset.input_size


    sess = tf.InteractiveSession()
    tf.global_variables_initializer().run()

    no_of_samples = sum([len(seq) for seq in test_input_seqs])
    if bEntropy:
        entropy_matrix = np.zeros((len(input_model_paths), no_of_samples, 10))  # batch_size
    else:
        entropy_matrix = np.zeros((len(input_model_paths), no_of_samples, 1))  # batch_size
    # Add ops to save and restore all the variables.
    for k, path in enumerate(input_model_paths):
        #path = '/volume/USERSTORE/kris_po/Suturing/features_Resnet_Imagenet/logs_translate_ensemble_/BidirectionalLSTM_0.50_1200_1_512_03_0.50_2.0000/B_C_D_E_F_G_H_I/1/'
        softmax_seqs = predict(sess,test_input_seqs,test_reset_seqs,path,bEntropy)
        entropy_matrix[k] = np.vstack(softmax_seqs)
        #print('softmax_seqs:',len(softmax_seqs))
    if bEntropy:
        measure,prediction_seqs = calculate_entropy(entropy_matrix,test_label_seqs)
    else:
        measure,prediction_seqs = calculate_variation_ratio(entropy_matrix,test_label_seqs)


    log_dir = '/volume/USERSTORE/kris_po/Suturing/features_Resnet_Imagenet/ensemble_B/'
    vis_filename = 'MCDropout_User_D_Trial_1_LOUO.png'
    vis_path = os.path.join(log_dir, vis_filename)

    accuracies = [metrics.compute_accuracy(pred_seq, label_seq)
                  for (pred_seq, label_seq) in zip(prediction_seqs, test_label_seqs)]
    accuracy_mean = np.mean(accuracies, dtype=np.float)
    edit_dists = [metrics.compute_edit_distance(pred_seq, label_seq)
                  for (pred_seq, label_seq) in zip(prediction_seqs, test_label_seqs)]
    edit_dist_mean = np.mean(edit_dists, dtype=np.float)
    line = '%.6f  %08.3f     ' % (accuracy_mean,
                                   edit_dist_mean)
    fig, axes = data.visualize_predictions(prediction_seqs,test_label_seqs,10, measure)
    axes[0].set_title(line)
    plt.tight_layout()
    plt.savefig(vis_path)
    plt.close(fig)
コード例 #10
0
ファイル: train_new.py プロジェクト: TSLNIHAOGIT/CRNN.tf2
                     math.pow(0.99, (step + 1) / N_BATCH))  # epoch
            learning_rate.assign(lr)

        #经过一定迭代步数之后才进行验证
        if (step + 1) % min(30, N_BATCH) == 0:
            '''
                每30次迭代之后,添加损失函数到tensorboard,并保存一次模型
                '''
            ground_truth = [
                each.decode('utf8') for each in ground_truth.numpy()
            ]
            # print('y true',targ)#dense_shape=tf.Tensor([30 10],
            print('ground_truth=', ground_truth)
            decoded = decoder.decode(y_pred_logits, method='beam_search')
            print('decoded', decoded)  #len is batch_size
            acc = compute_accuracy(ground_truth, decoded)
            print('acc', acc)

            # 每迭代一次,就将损失记录下来
            with writer.as_default():
                tf.summary.scalar('loss', batch_loss, step=step)
                tf.summary.scalar('accuracy', acc, step=step)
                tf.summary.scalar('lr', learning_rate.numpy(), step=step)
                writer.flush()
            print('Epoch {} Batch {} Loss {:.4f}  '.format(
                epoch, batch, batch_loss.numpy()))

            # if optimizer.iterations.numpy() % 10 == 0:
            for i in range(3):
                print("real:{:s}  pred:{:s} acc:{:f}".format(
                    ground_truth[i], decoded[i],
コード例 #11
0
def get_test_metrics():
    out_gs = model(test_inputs)
    loss = tf.reduce_mean(loss_function(out_gs, test_targets))
    p, s = compute_accuracy(out_gs[-1], test_targets)
    return loss, p, s
コード例 #12
0
train_fraction_solved = []
test_fraction_solved = []

### Training
for it in range(NUM_TRAIN_ITERATIONS):
    train_inputs, train_targets, _ = generate_graphs(NUM_TEST_EXAMPLES,
                                                     NUM_NODES_TEST_RANGE,
                                                     THETA, NODE_SAMPLING_RATE,
                                                     MIN_SOLUTION_PATH_LENGTH,
                                                     generator)

    loss, grads, outs = train_step(train_inputs, train_targets)
    optimizer.apply_gradients(zip(grads, model.trainable_variables))

    if it % LOG_ITERATION == 0:
        p, s = compute_accuracy(outs[-1], train_targets)
        loss_, p_, s_ = get_test_metrics()
        train_losses.append(loss.numpy())
        test_losses.append(loss_.numpy())
        train_fraction_predicted.append(p)
        test_fraction_predicted.append(p_)
        train_fraction_solved.append(s)
        test_fraction_solved.append(s_)
        print(
            "Iter: {} | Train loss: {} | Train predict: {} | Train solved: {} |\n\tTest loss: {} | Test predicted: {} | Test solved: {}"
            .format(it, loss.numpy(), p, s, loss_.numpy(), p_, s_))

p_inp = concat_graphs([test_inputs.get_graph_by_index(i) for i in range(10)])
p_target = concat_graphs(
    [test_targets.get_graph_by_index(i) for i in range(10)])
p_raw = test_raw_graphs[:10]
コード例 #13
0
def main():
    # Reset graph
    tf.reset_default_graph()

    with open("config.json", "r") as f:
        config = json.load(f)

    data = DataLoader(config)

    # Create placeholders
    X = tf.placeholder(tf.float32, [None, 32, 32, 1])
    y = tf.placeholder(tf.float32, [None, 10])

    # Create model and logits
    LeNet = model.LeNet(config)
    logits = LeNet.forward(X)

    # Compute metrics
    cost = compute_loss_xent(logits, targets=y)
    accuracy = compute_accuracy(logits, targets=y)

    # Define optimizer
    optimizer = LeNet.train_optimizer(cost, learning_rate=config["learning_rate"], \
     beta1=0.9, beta2=0.999, epsilon=1e-08)

    # Merging all summaries
    merged_summary = tf.summary.merge_all()

    # Create saver to save and restore model
    saver = tf.train.Saver(max_to_keep=config["max_to_keep"])

    ## Launching the execution graph for training
    with tf.Session() as sess:
        # Initializing all variables
        sess.run(tf.global_variables_initializer())
        # Visualizing the Graph
        writer = tf.summary.FileWriter("./tensorboard/" +
                                       config["experiment_name"])
        writer.add_graph(sess.graph)

        for i in range(config["num_epochs"]):
            for j in range(config["num_iter_per_epoch"]):
                # Yield batches of data
                batch_X, batch_y = next(data.next_batch(config["batch_size"]))
                # Run the optimizer
                sess.run(optimizer, feed_dict={X: batch_X, y: batch_y})
                # Compute train loss and accuracy
                loss, acc = sess.run([cost, accuracy],
                                     feed_dict={
                                         X: batch_X,
                                         y: batch_y
                                     })

            if (i % config["writer_step"] == 0):
                # Run the merged summary and write it to disk
                s = sess.run(merged_summary,
                             feed_dict={
                                 X: batch_X,
                                 y: batch_y
                             })
                writer.add_summary(s, (i + 1))

            if (i % config["save_step"] == 0):
                # Saving session
                saver.save(sess,
                           "./saver/" + config["experiment_name"] +
                           "/model_epoch",
                           global_step=(i + 1))

            # Evaluate the validation data
            loss_val, acc_val = sess.run([cost, accuracy],
                                         feed_dict={
                                             X: data.X_valid,
                                             y: data.y_valid
                                         })

            if (i % config["display_step"] == 0):
                print("Epoch:", "%03d," % (i + 1), \
                 "loss=", "%.5f," % (loss), \
                 "train acc=", "%.5f," % (acc), \
                 "val loss=", "%.5f," % (loss_val), \
                 "val acc=", "%.5f" % (acc_val)
                 )

        print("Training complete")

    ## Evaluate on test data by loading the saver
    with tf.Session() as sess:
        # Load the network from meta file created by saver
        new_saver = tf.train.import_meta_graph("./saver/" +
                                               config["experiment_name"] +
                                               "/model_epoch-" +
                                               str(config["num_epochs"]) +
                                               ".meta")
        # Restore the parameters
        new_saver.restore(
            sess,
            tf.train.latest_checkpoint("./saver/" + config["experiment_name"] +
                                       "/"))

        loss_test, acc_test = sess.run([cost, accuracy],
                                       feed_dict={
                                           X: data.X_test,
                                           y: data.y_test
                                       })

        print("test loss=", "%.5f," % (loss_test), "test accuracy=",
              "%.5f" % (acc_test))

        print("Testing complete")
コード例 #14
0
    # metrics_train_dataloader = None # et_train_dataloader(args.data_dir, eval_batch, dataset_version, shuffle=False, use_transforms=False)
    # metrics_test_dataloader = None # get_test_dataloader(args.data_dir, eval_batch, dataset_version, shuffle=False, use_transforms=False)

    model = dispatch_model(args, device)

    wandb.init(project=args.project_name, name=args.run_name, config=args)
    wandb.watch(model, log='all')
    config = wandb.config

    loss_function = CrossEntropyLoss(reduction='mean')
    optimizer = dispatch_optimizer(model, args)
    lr_scheduler = dispatch_lr_scheduler(optimizer, args)

    iteration = 0
    training_accuracy = compute_accuracy(model, train_dataloader, device)
    test_accuracy = compute_accuracy(model, test_dataloader, device)
    wandb.log({'training accuracy': training_accuracy}, step=iteration * bs)
    wandb.log({'test_accuracy': test_accuracy}, step=iteration * bs)

    for epoch in range(args.epochs):
        print(f'epoch {epoch}')
        if args.embedding:
            points = torch.arange(0, 100, dtype=torch.long).cuda()
            embedding = model.compute_embeddings(points).cpu().detach().numpy()
            embeddings.append(embedding)

        for x, y in train_dataloader:
            start_time = time.time()
            if args.test_random_truncate:
                if np.random.rand() < 0.25:
コード例 #15
0
def trial(net, n_trials = 30, input_criterion = 'mse', input_optimizer = 'sgd',
          n_epochs = 250, eta = 1e-3, start_seed = 0, verbose = False, save_data = False):
    '''
    Perform a trial on a network, i.e. several rounds of training.

    Parameters
    -------
    net
        The neural network
    n_trials
        Number of trainings to perform (Default: 30)
    input_criterion
        String to choose loss function
        'mse': MSE loss
        'cross': Cross-entropy loss
    input_optimizer
        String to choose optimizer
        'sgd': SGD
        'mom': SGD with momentum (0.9)
        'adam': Adam
    n_epochs
        Number of training epochs (Default: 250)
    eta
        Learning rate
    start_seed
        Indicates from where seeds are generated.
        start_seed = 0 with 20 trials means that seeds will be 0, ..., 19
    verbose
        If true, prints final loss, training accuracy and test accuracy for each trial
        Train verbose flag can be set to True to also log the loss every 10 epochs
    save_data
        If true, saves train and test accuracies as a tensor of size (n_trials,) in a .pt file
        Can be used to perform later statistical analyses (e.g. test differences of mean between configurations), if needed (not used for this project)

    Returns
    -------
    all_losses
        Training losses accumulated at each epoch for each trial
    tr_accuracies
        Final train accuracy reported at the end of each trial
    te_accuracies
        Final test accuracy reported at the end of each trial
    '''

    all_losses = torch.zeros((n_trials, n_epochs))
    tr_accuracies = torch.zeros(n_trials)
    te_accuracies = torch.zeros(n_trials)

    for i in range(n_trials):
        # Load data
        torch.manual_seed(start_seed+i)
        trainX, trainY, testX, testY = load_data()

        # Enable training mode and reset weights
        net.train()
        net.weight_initialization()

        # Train
        start = time.time()
        tr_loss = train(net, trainX, trainY, input_criterion,
                        input_optimizer, n_epochs, eta, verbose = False)
        print('Trial %d/%d... Training time: %.2f s' % (i+1, n_trials, time.time()-start))

        # Collect data
        all_losses[i] = tr_loss

        # Compute train and test accuracy
        net.eval() # Dropout layers are disabled in eval mode
        with torch.no_grad():
            tr_accuracies[i] = compute_accuracy(net, trainX, trainY)
            te_accuracies[i] = compute_accuracy(net, testX, testY)

        if verbose:
            print('Loss: %.4f, Train acc: %.4f, Test acc: %.4f' %
                  (tr_loss[-1], tr_accuracies[i], te_accuracies[i]))

    # Print trial results
    print('Train accuracy - mean: %.4f, std: %.4f, median: %.4f' %
         (tr_accuracies.mean(), tr_accuracies.std(), tr_accuracies.median()))
    print('Test accuracy - mean: %.4f, std: %.4f, median: %.4f' %
         (te_accuracies.mean(), te_accuracies.std(), te_accuracies.median()))

    if save_data:
        torch.save(tr_accuracies, f'train_{input_optimizer}_{input_criterion}_{len(net)}.pt')
        torch.save(te_accuracies, f'test_{input_optimizer}_{input_criterion}_{len(net)}.pt')
コード例 #16
0
train_fraction_predicted = []
test_fraction_predicted = []
train_fraction_solved = []
test_fraction_solved = []

for it in range(NUM_TRAIN_ITERATIONS):
    # Sample train data
    batch_graphs, batch_target_nodes, batch_target_edges, _, _ = sample_batch(
        NUM_TRAIN_EXAMPLES, NUM_ELEMENTS_TRAIN_RANGE, tf_generator)

    loss, grads, outs = train_step(batch_graphs, batch_target_nodes,
                                   batch_target_edges)
    optimizer.apply_gradients(zip(grads, model.trainable_variables))

    if it % LOG_ITERATION == 0:
        p, s = compute_accuracy(outs[-1], batch_target_nodes,
                                batch_target_edges)
        loss_, p_, s_ = get_test_metrics()
        train_losses.append(loss.numpy())
        test_losses.append(loss_.numpy())
        train_fraction_predicted.append(p)
        test_fraction_predicted.append(p_)
        train_fraction_solved.append(s)
        test_fraction_solved.append(s_)
        print(
            "Iter: {} | Train loss: {} | Train predict: {} | Train solved: {} |\n\tTest loss: {} | Test predicted: {} | Test solved: {}"
            .format(it, loss.numpy(), p, s, loss_.numpy(), p_, s_))

tg = test_graph.get_graph_by_index(0)

out_g = model(tg)[-1]
plot_graph_edges(tg,