Ejemplo n.º 1
0
def Nonlinear_Test(models):
    print("Load the validation data...")
    start_time = time.time()
    val_imgs, val_idxs = load_val_data(data_dir)
    print("{:.4f} seconds".format(time.time() - start_time))

    del val_imgs

    print("Extract the image features...")
    val_features = np.load('./val_bow.npy')

    print('Test the classifiers...')
    accuracy = 0
    for class_name in category:
        target_idxs = np.array([
            read_txt(os.path.join(data_dir, '{}_val.txt'.format(class_name)))
        ])
        target_labels = get_labels(val_idxs, target_idxs)

        val_accuracy = models[class_name].score(val_features, target_labels)
        print('{} Classifier validation accuracy:  {:.4f}'.format(
            class_name, val_accuracy))
        accuracy += val_accuracy

    del val_features, target_idxs, target_labels

    print('Average validation accuracy: {:.4f}'.format(accuracy /
                                                       len(category)))
Ejemplo n.º 2
0
def validate_model(classifier, date, num_epochs):
    """ Restore the classifier from given data and number of epochs, and run on validation set

    :param classifier: Object with class derived from BaseModel
    :param date: str, timestamp that is the name of the folder containing classifier's data
    :param num_epochs: int, the epoch number from which checkpoint is to be retrieved.
    :return: None
    """
    val_labels, val_ids, val_texts = load_val_data(
        load_texts=(classifier.model_type != ModelType.image_only))
    classifier.validation_test(date, num_epochs, val_ids, val_labels,
                               val_texts)
Ejemplo n.º 3
0
def main():
    global args, best_prec1
    best_prec1 = 1e6
    args = parser.parse_args()
    args.start_epoch = 0
    args.epochs = 50
    args.workers = 16
    args.seed = time.time()
    args.print_freq = 100

    # load the dronecrowd dataset
    root = '../dataset/'
    train_step = 1
    train_pair = load_train_data(root, train_step)
    val_step = 1
    val_pair = load_val_data(root, val_step)

    torch.cuda.manual_seed(args.seed)
    # load model
    use_loc = True if args.use_loc else False
    use_trk = True if args.use_trk else False
    model = STANet(use_loc, use_trk).cuda()

    if args.use_mGPUs:
        model = nn.DataParallel(model)

    model_parameters = filter(lambda p: p.requires_grad, model.parameters())
    optimizer1 = torch.optim.Adam(model_parameters,
                                  lr=0.000001,
                                  betas=(0.5, 0.999))
    optimizer2 = torch.optim.Adam(model_parameters,
                                  lr=0.00001,
                                  betas=(0.5, 0.999))
    optimizer3 = torch.optim.Adam(model_parameters,
                                  lr=0.000005,
                                  betas=(0.5, 0.999))
    if args.pre_train:
        if os.path.isfile(args.pre_train):
            print("=> loading checkpoint '{}'".format(args.pre_train))
            checkpoint = torch.load(args.pre_train)
            if checkpoint['epoch'] < 10:
                optimizer = optimizer1
            elif checkpoint['epoch'] >= 10 and checkpoint['epoch'] < 30:
                optimizer = optimizer2
            else:
                optimizer = optimizer3
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.pre_train, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.pre_train))

    criterion = nn.MSELoss(size_average=False).cuda()

    for epoch in range(args.start_epoch, args.epochs):
        if epoch < 10:
            optimizer = optimizer1
        elif epoch >= 10 and epoch < 30:
            optimizer = optimizer2
        else:
            optimizer = optimizer3

        train(train_pair, model, criterion, optimizer, epoch)
        with torch.no_grad():
            prec1 = validate(val_pair, model, criterion)
        is_best = prec1 < best_prec1
        best_prec1 = min(prec1, best_prec1)
        print(' * best MAE {mae:.3f} '.format(mae=best_prec1))
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'arch': args.pre_train,
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1,
                'optimizer': optimizer.state_dict(),
            }, is_best, args.task)
Ejemplo n.º 4
0
        [read_txt(os.path.join(data_dir, '{}_train.txt'.format(class_name)))])
    target_labels = get_labels(train_idxs, target_idxs)

    models[class_name] = train_classifier(train_features, target_labels,
                                          svm_params)
    train_accuracy = models[class_name].score(train_features, target_labels)
    print('{} zClassifier train accuracy:	{:.4f}'.format(
        class_name, train_accuracy))
    accuracy += train_accuracy

print('Average train accuracy: {:.4f}'.format(accuracy / len(category)))
del train_features, target_labels, target_idxs

print("Load the validation data...")
start_time = time.time()
val_imgs, val_idxs = load_val_data(data_dir)
print("{:.4f} seconds".format(time.time() - start_time))

print('Test the classifiers...')
accuracy = 0
for class_name in category:
    target_idxs = np.array(
        [read_txt(os.path.join(data_dir, '{}_val.txt'.format(class_name)))])
    target_labels = get_labels(val_idxs, target_idxs)

    val_accuracy = models[class_name].score(val_features, target_labels)
    print('{} Classifier validation accuracy:	{:.4f}'.format(
        class_name, val_accuracy))
    accuracy += val_accuracy

del val_features, target_idxs, target_labels
Ejemplo n.º 5
0
lr_decay = LearningRateScheduler(PolyDecay(0.01, 0.9, epochs).scheduler)

# ==========
# Generators
# ==========
if model_type == "early_fusion":
    train_generator = utils.early_fusion_generator(
        df=utils.load_train_data(configs.label_depth_color_path),
        batch_size=batch_size,
        resize_shape=(configs.img_width, configs.img_height),
        crop_shape=(configs.img_width, configs.img_height),
        n_classes=34,
        training=True)

    val_generator = utils.early_fusion_generator(
        df=utils.load_val_data(configs.val_depth_color_path),
        batch_size=1,
        resize_shape=(configs.img_width, configs.img_height),
        crop_shape=(configs.img_width, configs.img_height),
        n_classes=34,
        training=False)
elif "cross_fusion" in model_type:
    train_generator = utils.fusion_generator(
        df=utils.load_train_data(configs.label_depth_color_path),
        batch_size=batch_size,
        resize_shape=(configs.img_width, configs.img_height),
        n_classes=34,
        training=True)

    val_generator = utils.fusion_generator(
        df=utils.load_val_data(configs.val_depth_color_path),
 def __init__(self, encoder_layer_num, decoder_layer_num, hidden_dim, batch_size, learning_rate, dropout, init_train = True):
     self.encoder_layer_num = encoder_layer_num
     self.decoder_layer_num = decoder_layer_num
     self.hidden_dim = hidden_dim
     self.batch_size = batch_size
     self.learning_rate = learning_rate
     self.dropout = dropout
     self.init_train = init_train
     #---------fix----------
     self.vocab_size = cfg.vocab_size
     self.max_length = cfg.max_length
     self.embedding_matrix = make_embedding_matrix(cfg.all_captions)
     self.SOS_token = cfg.SOS_token
     self.EOS_token = cfg.EOS_token
     self.idx2word_dict = load_dict()
     #----------------------
     
     self.bleu = BLEU('BLEU', gram=[2,3,4,5])
     #self.bleu.reset(test_text = gen_tokens, real_text = self.test_data.tokens)
           
     if init_train:
         self._init_train()
         train_week_stock, train_month_stock, t_month_stock,train_input_cap_vector, train_output_cap_vector = load_training_data()
         self.train_data = batch_generator(train_week_stock, train_month_stock, t_month_stock,train_input_cap_vector, train_output_cap_vector, self.batch_size)
         self.total_iter = len(train_input_cap_vector)
         
         self._init_eval()
         val_week_stock, val_month_stock, val_t_month_stock,val_input_cap_vector, val_output_cap_vector = load_val_data()
         self.val_data = batch_generator(val_week_stock, val_month_stock, val_t_month_stock,val_input_cap_vector, val_output_cap_vector, self.batch_size)
         self.val_total_iter = len(val_input_cap_vector)
Ejemplo n.º 7
0
    batch_size=batch_size,
    log_dir="./logs/ICNet/" + model_type +
    "/{}/".format(strftime("%Y-%m-%d-%H-%M-%S", gmtime())))
lr_decay = LearningRateScheduler(PolyDecay(0.01, 0.9, epochs).scheduler)

# Generators
train_generator = utils.generator(df=utils.load_train_data(),
                                  batch_size=batch_size,
                                  resize_shape=(configs.img_width,
                                                configs.img_height),
                                  crop_shape=(configs.img_width,
                                              configs.img_height),
                                  n_classes=34,
                                  training=True)

val_generator = utils.generator(df=utils.load_val_data(configs.val_label_path),
                                batch_size=1,
                                resize_shape=(configs.img_width,
                                              configs.img_height),
                                crop_shape=(configs.img_width,
                                            configs.img_height),
                                n_classes=34,
                                training=False)

# Optimizer
optim = optimizers.SGD(lr=0.01, momentum=0.9)

# Model
net = ICNet(width=configs.img_width,
            height=configs.img_height,
            n_classes=34,