Example #1
0
def test(test_dataset, checkpoints_path, batch_size, single_output_idx=None):
    # Build dataloader
    test_dataloader = Dataloader(test_dataset,
                                 batch_size=batch_size,
                                 shuffle=False,
                                 single_output_idx=single_output_idx,
                                 multiple_inputs=TAB_INPUT,
                                 add_normal_cls=ADD_NORMAL_CLS)

    # Load model
    print("Loading best model...")
    model = tf.keras.models.load_model(
        filepath=checkpoints_path,
        compile=False)  # Loads best model automatically
    model.summary()

    # Compile the model
    model.compile(loss=get_losses(),
                  metrics=get_metrics(single_output_idx,
                                      add_normal=ADD_NORMAL_CLS))

    # Evaluate model
    print("Evaluating model...")
    scores = model.evaluate(test_dataloader)
    print("Evaluation results")
    print(scores)
    return scores
Example #2
0
    def test_getitem(self):
        batch_size = 2
        dataloader = Dataloader(self.dataset, batch_size=batch_size)

        item = dataloader[0]
        self.assertEqual(len(item[0]), 2)
        self.assertEqual(len(item[0][0]), batch_size)
        self.assertEqual(len(item[0][1]), batch_size)

        self.assertEqual(len(item[0][0][0]), self.max_len)
        self.assertEqual(len(item[0][0][1]), self.max_len)
Example #3
0
def train(model,
          train_dataset,
          val_dataset,
          batch_size,
          epochs1,
          epochs2,
          checkpoints_path=None,
          logs_path=None,
          plots_path=None,
          use_multiprocessing=False,
          workers=1,
          single_output_idx=None):
    # Build dataloaders
    train_dataloader = Dataloader(train_dataset,
                                  batch_size=batch_size,
                                  shuffle=True,
                                  single_output_idx=single_output_idx,
                                  multiple_inputs=TAB_INPUT,
                                  add_normal_cls=ADD_NORMAL_CLS)
    val_dataloader = Dataloader(val_dataset,
                                batch_size=batch_size,
                                shuffle=False,
                                single_output_idx=single_output_idx,
                                multiple_inputs=TAB_INPUT,
                                add_normal_cls=ADD_NORMAL_CLS)

    # Callbacks
    model_callbacks = [
        tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss',
                                             factor=0.5,
                                             patience=5,
                                             min_lr=1e-7),
        CustomEarlyStopping(patience=PATIENCE,
                            minimum_epochs=WAIT_EPOCH_WARMUP),
        CustomModelCheckpoint(filepath=checkpoints_path,
                              save_best_only=True,
                              wait_epoch_warmup=WAIT_EPOCH_WARMUP),
        # It can make the end of an epoch extremely slow
        tf.keras.callbacks.TensorBoard(log_dir=logs_path),
        # WandbCallback(),
    ]

    # # Unfreezing layers
    # print("------------------------------------------")
    # unfreeze_base_model(model, n=UNFREEZE_N)
    # print("------------------------------------------")

    # Compile the model
    model.compile(optimizer=Adam(learning_rate=LR_EPOCH1),
                  loss=get_losses(),
                  metrics=get_metrics(single_output_idx,
                                      add_normal=ADD_NORMAL_CLS))

    # Print model
    model.summary()

    # train the model on the new data for a few epochs
    if epochs1 <= 0:
        print("Skipping training output layers")
    else:
        print("Training output layers...")
        history1 = model.fit(train_dataloader,
                             validation_data=val_dataloader,
                             epochs=epochs1,
                             callbacks=model_callbacks,
                             use_multiprocessing=use_multiprocessing,
                             workers=workers)
        print("Initial training results:")
        print(history1.history)
        if plots_path:
            plot_hist(history1,
                      title="Training output layers",
                      savepath=plots_path,
                      suffix="_initial",
                      show_plot=SHOW_PLOTS)

    # Fine-tune?
    if epochs2 <= 0:
        print("Skipping fine-tuning")
    else:
        # Unfreezing layers
        print("------------------------------------------")
        unfreeze_base_model(model, n=UNFREEZE_N)
        print("------------------------------------------")

        # we need to recompile the model for these modifications to take effect
        # we use SGD with a low learning rate
        print("Fine-tuning model...")
        model.compile(optimizer=SGD(learning_rate=LR_EPOCH2, momentum=0.9),
                      loss=get_losses(),
                      metrics=get_metrics(single_output_idx,
                                          add_normal=ADD_NORMAL_CLS))

        # Print model
        model.summary()

        # we train our model again (this time fine-tuning the top 2 inception blocks
        # alongside the top Dense layers
        history2 = model.fit(train_dataloader,
                             validation_data=val_dataloader,
                             epochs=epochs2,
                             callbacks=model_callbacks,
                             use_multiprocessing=use_multiprocessing,
                             workers=workers)
        print("Fine-tuning results:")
        print(history2.history)
        if plots_path:
            plot_hist(history2,
                      title="Fine-tuning full model",
                      savepath=plots_path,
                      suffix="_finetuning",
                      show_plot=SHOW_PLOTS)
Example #4
0
 def test_new(self):
     dataloader = Dataloader(self.dataset)
     self.assertIsNotNone(dataloader)
Example #5
0
_SAMPLE_VIDEO_FRAMES = 24
_LABEL_MAP_PATH = '/home/pr606/python_vir/yuan/i3d-kinects/data/label_map.txt'
with open(_LABEL_MAP_PATH) as f2:
    kinetics_classes = [x.strip() for x in f2.readlines()]

validate_set = Dataset.DataSet(clip_length=_SAMPLE_VIDEO_FRAMES,
                                        sample_step=2,
                                        data_root='/home/pr606/Pictures/part_validate_kinetics',
                                        annotation_path='/home/pr606/python_vir/yuan/EXTRA_DATA/kinetics_part.json',
                                        spatial_transform=None,
                                        mode='validation',
                                        with_start=True,
                                        multi_sample=True
                                        )

validate_generator = Dataloader.DataGenerator(validate_set, batch_size=batch_size, ordered_file_path='/home/pr606/python_vir/yuan/EXTRA_DATA/names_in_order.csv')


num_validate = validate_generator.__len__() # 1005
print("total validate data is :{}".format(num_validate))


inputs = tf.placeholder(shape=(batch_size,_SAMPLE_VIDEO_FRAMES,112,112,3),dtype=tf.float32)

mean, variance = tf.nn.moments(inputs, axes=(0, 1, 2, 3), keep_dims=True, name="normalize_moments")

Gamma = tf.constant(1.0, name="scale_factor", shape=mean.shape, dtype=tf.float32)
Beta = tf.constant(0.0,name="offset_factor", shape=mean.shape, dtype=tf.float32)
data = tf.nn.batch_normalization(inputs, mean, variance, offset=Beta, scale=Gamma, variance_epsilon=1e-3)

'''
Example #6
0
        clip_length=_SAMPLE_VIDEO_FRAMES,
        sample_step=2,
        data_root='/home/pr606/Pictures/UCF101DATASET/ucf101',
        annotation_path=
        '/home/pr606/Pictures/dataset_annotations/ucf101_json_file/ucf101_01.json',
        spatial_transform=None,
        mode='train')
    validate_set = Dataset.DataSet(
        clip_length=_SAMPLE_VIDEO_FRAMES,
        sample_step=2,
        data_root='/home/pr606/Pictures/UCF101DATASET/ucf101',
        annotation_path=
        '/home/pr606/Pictures/dataset_annotations/ucf101_json_file/ucf101_01.json',
        spatial_transform=None,
        mode='validation')
    train_generator = Dataloader.DataGenerator(train_set,
                                               batch_size=batch_size)
    validate_generator = Dataloader.DataGenerator(validate_set,
                                                  batch_size=batch_size)
    num_train = train_generator.__len__()
    num_validate = validate_generator.__len__()

    print("training data num is %d" % num_train)  # 733
    print("validation data num is %d" % num_validate)  # 291

    graph = tf.get_default_graph()
    with graph.as_default():
        data = tf.placeholder(shape=(batch_size, _SAMPLE_VIDEO_FRAMES,
                                     _IMAGE_SIZE, _IMAGE_SIZE, 1),
                              dtype=tf.float32)
        label = tf.placeholder(shape=(batch_size, NUM_CLASS), dtype=tf.int32)
        result = models(data, class_num=101, scope='at_model')
def train():
    train_set=make_dataset('C1-P1_Train')
    train_loader=Dataloader(dataset=train_set,batch_size=opt.train_batch_size,shuffle=True,num_workers=opt.num_workers)

    dev_set=make_dataset('C1-P1_Dev')
    dev_loader=Dataloader(dataset=dev_set,batch_size=opt.dev_batch_size,shuffle=True,num_workers=opt.num_workers)

    net=get_net(opt.model)
    if (opt.model[0:9] == 'resnest'):
        model=net(opt.num_classes)
    model = net
    model=model.cuda(opt.cuda_devices)

    best_model_params_acc = copy.deepcopy(model.state_dict())
    best_model_params_loss = copy.deepcopy(model.state_dict())

    best_acc=0.0
    best_loss = float('inf')

    training_loss_list = []
    training_acc_list = []
    dev_loss_list = []
    dev_acc_list = []

    criterion = nn.CrossEntropyLoss()
    
    # optimizer = adabound.AdaBound(model.parameters(), lr=opt.lr, final_lr=0.1)
    # optimizer = torch.optim.Adam(params=model.parameters(), lr=opt.lr, betas=(0.9, 0.999), eps=1e-08, weight_decay=0.0001, amsgrad=True)
    optimizer = torch.optim.SGD(model.parameters(), lr=opt.lr, momentum=0.9, weight_decay=5e-4, nesterov=True)
    # optimizer = torch.optim.RMSprop(params=model.parameters(), lr=opt.lr, alpha=0.99, eps=1e-08, weight_decay=5e-4, momentum=0.9, centered=False)
    # optimizer = torch.optim.AdamW(params=model.parameters(), lr=opt.lr, betas=(0.9, 0.999), eps=1e-08, weight_decay=5e-4, amsgrad=True)
    step = 0

    # scheduler = scheduler = StepLR(optimizer, step_size=10, gamma=0.5, last_epoch=-1)
    scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=4, verbose=True, cooldown=1)
    record=open('record.txt','w')

    early_stopping = EarlyStopping(patience=20, verbose=True)

    for epoch in range(opt.epochs):
        print(f'Epoch: {epoch+1}/{opt.epochs}')
        print('-'*len(f'Epoch: {epoch+1}/{opt.epochs}'))

        training_loss = 0.0
        training_corrects = 0

        model.train()

        for i, (inputs,labels) in enumerate(tqdm(train_loader)):
            inputs=Variable(inputs.cuda(opt.cuda_devices))
            labels=Variable(labels.cuda(opt.cuda_devices))

            optimizer.zero_grad()
            outputs=model(inputs)

            _, preds = torch.max(outputs.data, 1)
            loss = criterion(outputs, labels)
            
            loss.backward()
            optimizer.step()

            training_loss += loss.item() * inputs.size(0)
            training_corrects += torch.sum(preds == labels.data)
        
        training_loss = training_loss / len(train_set)
        training_acc = float(training_corrects) / len(train_set)

        training_loss_list.append(training_loss)
        training_acc_list.append(training_acc)

        print(f'Training loss: {training_loss:.4f}\taccuracy: {training_acc:.4f}')

        model.eval()

        dev_loss=0.0
        dev_corrects=0

        for i,(inputs,labels) in enumerate(tqdm(dev_loader)):
            inputs=Variable(inputs.cuda(opt.cuda_devices))
            labels=Variable(labels.cuda(opt.cuda_devices))

            outputs=model(inputs)

            _,preds=torch.max(outputs.data,1)
            loss=criterion(outputs,labels)

            dev_loss+=loss.item()*inputs.size(0)
            dev_corrects+=torch.sum(preds==labels.data)

        dev_loss=dev_loss/len(dev_set)
        dev_acc=float(dev_corrects)/len(dev_set)

        dev_loss_list.append(dev_loss)
        dev_acc_list.append(dev_acc)

        print(f'Dev loss: {dev_loss:.4f}\taccuracy: {dev_acc:.4f}\n')

        scheduler.step(dev_loss)
        early_stopping(dev_loss, model)
        if early_stopping.early_stop:
            print("Early Stopping")
            break

        if dev_acc > best_acc:
            best_acc = dev_acc
            best_acc_dev_loss = dev_loss

            best_train_acc=training_acc
            best_train_loss=training_loss 

            best_model_params_acc = copy.deepcopy(model.state_dict())
        
        if dev_loss < best_loss:
            the_acc = dev_acc
            best_loss = dev_loss

            the_train_acc = training_acc
            the_train_loss =  training_loss

            best_model_params_loss = copy.deepcopy(model.state_dict())

        if (epoch+1)%50==0:
            model.load_state_dict(best_model_params_loss)
            weight_path=Path(opt.checkpoint_dir).joinpath(f'model-{epoch+1}epoch-{best_loss:.02f}-loss-{the_acc:.02f}-acc.pth')
            torch.save(model,str(weight_path))

            model.load_state_dict(best_model_params_acc)
            weight_path=Path(opt.checkpoint_dir).joinpath(f'model-{epoch+1}epoch-{best_acc:.02f}-acc.pth')
            torch.save(model,str(weight_path))

            record.write(f'{epoch+1}\n')
            record.write(f'Best training loss: {best_train_loss:.4f}\tBest training accuracy: {best_train_acc:.4f}\n')
            record.write(f'Best dev loss: {best_acc_dev_loss:.4f}\tBest dev accuracy: {best_acc:.4f}\n\n')
            visualization(training_loss_list, training_acc_list, dev_loss_list, dev_acc_list, epoch+1)


        """
        if (epoch+1) == 100:
            scheduler = ReduceLROnPlateau(optimizer, mode='max', factor=0.5, patience=4, verbose=True, cooldown=1)
            early_stopping = EarlyStopping(patience=10, verbose=True)"""
        
        """
        if (epoch+1) >= 50:
            early_stopping(dev_loss,model)
            if early_stopping.early_stop:
                print("Early Stoppping")
                break"""

    print('Based on best accuracy:')
    print(f'Best training loss: {best_train_loss:.4f}\t Best training accuracy: {best_train_acc:.4f}')
    print(f'Best dev loss: { best_acc_dev_loss:.4f}\t Best dev accuracy: {best_acc:.4f}\n')
        
    model.load_state_dict(best_model_params_acc)
    weight_path=Path(opt.checkpoint_dir).joinpath(f'model-{best_acc:.02f}-best_acc.pth')
    torch.save(model, str(weight_path))

    print('Based on best loss:')
    print(f'Best training loss: {the_train_loss:.4f}\t Best training accuracy: {the_train_acc:.4f}')
    print(f'Best dev loss: {best_loss:.4f}\t Best dev accuracy: {the_acc:.4f}\n')
        
    model.load_state_dict(best_model_params_loss)
    weight_path=Path(opt.checkpoint_dir).joinpath(f'model-{best_loss:.02f}-best_loss-{the_acc:.02f}-acc.pth')
    torch.save(model, str(weight_path))

    visualization(training_loss_list, training_acc_list, dev_loss_list, dev_acc_list, epoch+1)
Example #8
0
                        {'f1':
                         '{0:1.5f}'.format(2 * A / (B + C))})  # 输入一个字典,显示实验指标
                    pbar.update(1)

            return 2 * A / (B + C), A / B, A / C

    evaluator = Evaluate()
    model.fit_generator(dataloader,
                        steps_per_epoch=len(dataloader),
                        epochs=100,
                        callbacks=[evaluator])
    # model.compile(optimizer=M_Nadam(cfg['lr'],multipliers=multipliers), loss=crf.loss_function)


if __name__ == '__main__':
    data = open('../datasets/train_data1.json', 'r',
                encoding='utf-8').readlines()
    val_data = open('../datasets/dev_data.json', 'r',
                    encoding='utf-8').readlines()
    id2char, char2id = json.load(
        open('../datasets/all_chars_me1.json', encoding='utf-8'))
    id2entity, entity2id = json.load(
        open('../datasets/all_entity_type.json', encoding='utf-8'))
    dataloader = Dataloader(data, char2id, entity2id, cfg)
    val_dataloader = Dataloader(val_data, char2id, entity2id, cfg)

    net = crf_model(cfg)
    net.summary()
    # net.load_weights('best_model1.weights')
    main(net, dataloader, val_dataloader)