示例#1
0
文件: SGL.py 项目: aahamed/SGL-DANN
 def plot_stats(self):
     for i in range(self.N):
         prefix = f'Learner {i}'
         fname = os.path.join(self.exp_dir,
                              f'{self.stat_type}-loss-acc-{i}.png')
         plot_loss_acc(self.label_losses[i], self.domain_losses[i],
                       self.top1[i], self.tgt_top1[i], prefix, fname)
def train_with_data_aug():
    # 数据增强后训练模型
    datagen, X_train, X_val, Y_train, Y_val = data_aug(X, Y)
    sgd = SGD(lr=LR, decay=1e-6, momentum=0.9, nesterov=True)
    adm = Adam(lr=0.001, decay=1e-6)
    model.compile(loss='categorical_crossentropy',
                  optimizer=sgd,
                  metrics=['accuracy'])

    history2 = model.fit_generator(datagen.flow(X_train,
                                                y_train,
                                                batch_size=BATCH_SIZE),
                                   samples_per_epoch=X_train.shape[0],
                                   nb_epoch=EPOCHS,
                                   validation_data=(X_val, y_val),
                                   callbacks=[
                                       ReduceLROnPlateau('val_loss',
                                                         factor=0.2,
                                                         patience=20,
                                                         verbose=1,
                                                         mode='auto'),
                                       ModelCheckpoint('model_data_aug.h5',
                                                       save_best_only=True)
                                   ])
    plot_loss_acc(history)

    return model
def train():
    # 训练配置

    sgd = SGD(lr=LR, decay=1e-6, momentum=0.9, nesterov=True)
    adm = Adam(lr=0.001, decay=1e-6)
    model.compile(loss='categorical_crossentropy',
                  optimizer=sgd,
                  metrics=['accuracy'])

    history = model.fit(X,
                        Y,
                        batch_size=BATCH_SIZE,
                        epochs=EPOCHS,
                        validation_split=0.2,
                        callbacks=[
                            LearningRateScheduler(lr_schedule),
                            ModelCheckpoint(MODEL, save_best_only=True)
                        ])
    plot_loss_acc(history)
    return model
示例#4
0
def train(img_size,
          device=torch.device('cpu'),
          learning_rate=1e-3,
          num_epochs=500,
          decay_step=5,
          gamma=0.98,
          num_classes=10,
          lambda_=0.5,
          m_plus=0.9,
          m_minus=0.1,
          checkpoint_folder=None,
          checkpoint_name=None,
          load_checkpoint=False,
          graphs_folder=None):
    '''
    Function to train the DeepCaps Model
    '''
    checkpoint_path = checkpoint_folder + checkpoint_name

    deepcaps = DeepCapsModel(num_class=num_classes,
                             img_height=img_size,
                             img_width=img_size,
                             device=device).to(device)  #initialize model

    #load the current checkpoint
    if load_checkpoint and not checkpoint_name is None and os.path.exists(
            checkpoint_path):
        try:
            deepcaps.load_state_dict(torch.load(checkpoint_path))
            print("Checkpoint loaded!")
        except Exception as e:
            print(e)
            sys.exit()

    optimizer = torch.optim.Adam(deepcaps.parameters(), lr=learning_rate)
    # lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer=optimizer, step_size=decay_step, gamma=gamma)

    best_accuracy = 0

    training_loss_list = []
    training_acc_list = []
    testing_loss_list = []
    testing_acc_list = []

    #training and testing
    for epoch_idx in range(num_epochs):

        print(
            f"Training and testing for epoch {epoch_idx} began with LR : {get_learning_rate(optimizer)}"
        )
        #Training
        batch_loss = 0
        batch_accuracy = 0
        batch_idx = 0

        deepcaps.train()  #train mode
        for batch_idx, (train_data, labels) in tqdm(
                enumerate(train_loader)):  #from training dataset

            data, labels = train_data.to(device), labels.to(device)
            onehot_label = onehot_encode(
                labels, num_classes=num_classes,
                device=device)  #convert the labels into one-hot vectors.

            optimizer.zero_grad()

            outputs, _, reconstructed, indices = deepcaps(data, onehot_label)
            loss = deepcaps.loss(x=outputs,
                                 reconstructed=reconstructed,
                                 data=data,
                                 labels=onehot_label,
                                 lambda_=lambda_,
                                 m_plus=m_plus,
                                 m_minus=m_minus)

            loss.backward()
            optimizer.step()

            batch_loss += loss.item()
            batch_accuracy += accuracy_calc(predictions=indices, labels=labels)

        epoch_accuracy = batch_accuracy / (batch_idx + 1)
        avg_batch_loss = batch_loss / (batch_idx + 1)
        print(
            f"Epoch : {epoch_idx}, Training Accuracy : {epoch_accuracy}, Training Loss : {avg_batch_loss}"
        )

        training_loss_list.append(avg_batch_loss)
        training_acc_list.append(epoch_accuracy)

        #Testing
        batch_loss = 0
        batch_accuracy = 0
        batch_idx = 0

        deepcaps.eval()  #eval mode
        for batch_idx, (test_data, labels) in tqdm(
                enumerate(test_loader)):  #from testing dataset

            data, labels = test_data.to(device), labels.to(device)
            onehot_label = onehot_encode(labels,
                                         num_classes=num_classes,
                                         device=device)

            outputs, _, reconstructed, indices = deepcaps(data, onehot_label)
            loss = deepcaps.loss(x=outputs,
                                 reconstructed=reconstructed,
                                 data=data,
                                 labels=onehot_label,
                                 lambda_=lambda_,
                                 m_plus=m_plus,
                                 m_minus=m_minus)

            batch_loss += loss.item()
            batch_accuracy += accuracy_calc(predictions=indices, labels=labels)

        epoch_accuracy = batch_accuracy / (batch_idx + 1)
        avg_batch_loss = batch_loss / (batch_idx + 1)
        print(
            f"Epoch : {epoch_idx}, Testing Accuracy : {epoch_accuracy}, Testing Loss : {avg_batch_loss}"
        )

        testing_loss_list.append(avg_batch_loss)
        testing_acc_list.append(epoch_accuracy)

        # lr_scheduler.step()

        if not graphs_folder is None and epoch_idx % 5 == 0:
            plot_loss_acc(path=graphs_folder,
                          num_epoch=epoch_idx,
                          train_accuracies=training_acc_list,
                          train_losses=training_loss_list,
                          test_accuracies=testing_acc_list,
                          test_losses=testing_loss_list)

            plot_reconstruction(path=graphs_folder,
                                num_epoch=epoch_idx,
                                original_images=data.detach(),
                                reconstructed_images=reconstructed.detach(),
                                predicted_classes=indices.detach(),
                                true_classes=labels.detach())

        if best_accuracy < epoch_accuracy:

            torch.save(deepcaps.state_dict(), checkpoint_path)
            print("Saved model at epoch %d" % (epoch_idx))
示例#5
0
文件: mlp.py 项目: mihailmir/diplom
def predict_classes(model, data):
    with open('encoders/class_encoder.pkl', 'rb') as f:
        class_encoder = pickle.load(f)
    print(class_encoder.inverse_transform(model.predict_classes(data)))
    data = pd.read_csv('Training history.csv')
    plot_loss_acc(data)