Ejemplo n.º 1
0
def but5():
    newVal = int(request.form['newVal'])
    test_script.test_data(newVal,10)
    f = open("model_output.txt","r")
    if f.mode == 'r':
        contents =f.read()

    return contents
def main(args):
    cnn = CNN().to(device)

    cnn.train()
    criterion = nn.MultiLabelSoftMarginLoss()
    optimizer = torch.optim.Adam(cnn.parameters(), lr=learning_rate)

    if args.resume:
        cnn.load_state_dict(torch.load(args.model_path, map_location=device))

    max_acc = 0
    # Train the Model
    train_dataloader = datasets.get_train_data_loader()
    for epoch in range(num_epochs):
        for i, (images, labels) in enumerate(train_dataloader):
            images = Variable(images)
            labels = Variable(labels.float())
            predict_labels = cnn(images)
            loss = criterion(predict_labels, labels)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            if (i + 1) % 2 == 0:
                print("epoch: %03g \t step: %03g \t loss: %.5f \t\r" %
                      (epoch, i + 1, loss.item()))
                torch.save(cnn.state_dict(), "./weights/cnn_%03g.pt" % epoch)
        print("epoch: %03g \t step: %03g \t loss: %.5f \t" %
              (epoch, i, loss.item()))
        torch.save(cnn.state_dict(), "./weights/cnn_%03g.pt" % epoch)
        acc = test.test_data("./weights/cnn_%03g.pt" % epoch)
        if max_acc < acc:
            print("update accuracy %.5f." % acc)
            max_acc = acc
            shutil.copy("./weights/cnn_%03g.pt" % epoch,
                        "./weights/cnn_best.pt")
        else:
            print("do not update %.5f." % acc)

    torch.save(cnn.state_dict(), "./weights/cnn_last.pt")
    print("save last model")
Ejemplo n.º 3
0
def summary():
    print("summary")
    train_data()
    test_data()
    ans_data()
    print("train2emb模型运行完成")
Ejemplo n.º 4
0
    model.compile(loss='categorical_crossentropy',
                  optimizer=SGD(lr=0.1),
                  metrics=['accuracy'])
    # start trainning
    model.fit(x_train, y_train, batch_size=1200, epochs=20)

    #save weights
    model.save_weights('weights.h5', overwrite=True)

    #validation
    result = model.evaluate(x_train, y_train, batch_size=240)
    print('\nTrain Acc', result[1])

    result = model.evaluate(x_test, y_test, batch_size=240)
    print('\nTest Acc', result[1])

else:
    #load weights
    model.load_weights('weights.h5')

    # predit
    files = [
        '0.BMP', '1.BMP', '2.BMP', '3.BMP', '4.BMP', '5.BMP', '6.BMP', '7.BMP',
        '8.BMP', '9.BMP'
    ]
    for i in range(10):

        array = list(model.predict(test_data(files[i]))[0])
        # print(array)
        print(array.index(max(array)))
Ejemplo n.º 5
0
def train_emotic(result_path, model_path, train_log_path, val_log_path, ind2cat, ind2vad, context_norm, body_norm,
                 context_mask_norm, context_seg_norm, context_depth_norm,
                 args):
    ''' Prepare dataset, dataloders, models. 
    :param result_path: Directory path to save the results (val_predidictions mat object, val_thresholds npy object).
    :param model_path: Directory path to load pretrained base models and save the models after training. 
    :param train_log_path: Directory path to save the training logs. 
    :param val_log_path: Directoty path to save the validation logs. 
    :param ind2cat: Dictionary converting integer index to categorical emotion. 
    :param ind2vad: Dictionary converting integer index to continuous emotion dimension (Valence, Arousal and Dominance).
    :param context_norm: List containing mean and std values for context images. 
    :param body_norm: List containing mean and std values for body images. 
    :param args: Runtime arguments. 
    '''
    # Load preprocessed data from npy files
    train_context = np.load(os.path.join(args.data_path, 'train_context_arr.npy'))
    train_context_mask = np.load(os.path.join(args.data_path, 'train_context_mask_arr.npy'))
    train_context_depth = np.load(os.path.join(args.data_path, 'train_context_depth_arr.npy'))
    train_context_seg = np.load(os.path.join(args.data_path, 'train_context_seg_arr.npy'))
    train_body = np.load(os.path.join(args.data_path, 'train_body_arr.npy'))
    train_cat = np.load(os.path.join(args.data_path, 'train_cat_arr.npy'))
    train_cont = np.load(os.path.join(args.data_path, 'train_cont_arr.npy'))

    val_context = np.load(os.path.join(args.data_path, 'val_context_arr.npy'))
    val_context_mask = np.load(os.path.join(args.data_path, 'val_context_mask_arr.npy'))
    val_context_depth = np.load(os.path.join(args.data_path, 'val_context_mask_arr.npy'))
    val_context_seg = np.load(os.path.join(args.data_path, 'val_context_mask_arr.npy'))
    val_body = np.load(os.path.join(args.data_path, 'val_body_arr.npy'))
    val_cat = np.load(os.path.join(args.data_path, 'val_cat_arr.npy'))
    val_cont = np.load(os.path.join(args.data_path, 'val_cont_arr.npy'))

    print('train ', 'context ', train_context.shape, 'body', train_body.shape, 'cat ', train_cat.shape, 'cont',
          train_cont.shape)
    print('val ', 'context ', val_context.shape, 'body', val_body.shape, 'cat ', val_cat.shape, 'cont', val_cont.shape)

    # Initialize Dataset and DataLoader 
    train_transform = transforms.Compose([transforms.ToPILImage(), transforms.RandomHorizontalFlip(),
                                          transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),
                                          transforms.ToTensor()])
    test_transform = transforms.Compose([transforms.ToPILImage(), transforms.ToTensor()])

    train_dataset = Emotic_PreDataset(train_context, train_body, train_context_mask, train_context_seg,
                                      train_context_depth, train_cat, train_cont, train_transform, context_norm,
                                      body_norm, context_mask_norm, context_seg_norm, context_depth_norm)
    val_dataset = Emotic_PreDataset(val_context, val_body, val_context_mask, val_context_seg, val_context_depth,
                                    val_cat, val_cont, test_transform, context_norm, body_norm, context_mask_norm,
                                    context_seg_norm, context_depth_norm)

    train_loader = DataLoader(train_dataset, args.batch_size, shuffle=True)
    val_loader = DataLoader(val_dataset, args.batch_size, shuffle=False)

    print('train loader ', len(train_loader), 'val loader ', len(val_loader))

    # Prepare models 
    model_context, model_body, model_ABN, model_context_seg, model_context_depth = prep_models(
        context_model=args.context_model, body_model=args.body_model, model_dir=model_path)

    emotic_model = Emotic(list(model_context.children())[-1].in_feature, list(model_body.children())[-1].in_features,
                          512, 512, args)
    if args.context_ABN == True:
        model_context = model_ABN
    else:
        model_context = nn.Sequential(*(list(model_context.children())[:-1]))
    model_body = nn.Sequential(*(list(model_body.children())[:-1]))
    model_context_seg = nn.Sequential(*(list(model_context_seg.children())[:-1]))
    model_context_depth = nn.Sequential(*(list(model_context_depth.children())[:-1]))

    for param in emotic_model.parameters():
        param.requires_grad = True
    for param in model_context.parameters():
        param.requires_grad = True
    for param in model_body.parameters():
        param.requires_grad = True

    device = torch.device("cuda:%s" % (str(args.gpu)) if torch.cuda.is_available() else "cpu")
    # device = torch.device("cpu")

    if args.context_depth == True and args.context_seg == True:
        for param in model_context_seg.parameters():
            param.requires_grad = True
        for param in model_context_depth.parameters():
            param.requires_grad = True
        opt = optim.Adam((list(emotic_model.parameters()) + list(model_context.parameters()) + list(
            model_body.parameters()) + list(model_context_depth.parameters()) + list(model_context_seg.parameters())),
                         lr=args.learning_rate, weight_decay=args.weight_decay)
    elif args.context_depth == True:
        for param in model_context_depth.parameters():
            param.requires_grad = True
        opt = optim.Adam((list(emotic_model.parameters()) + list(model_context.parameters()) + list(
            model_body.parameters()) + list(model_context_depth.parameters())),
                         lr=args.learning_rate, weight_decay=args.weight_decay)
    elif args.context_seg == True:
        for param in model_context_seg.parameters():
            param.requires_grad = True
        opt = optim.Adam((list(emotic_model.parameters()) + list(model_context.parameters()) + list(
            model_body.parameters()) + list(model_context_seg.parameters())),
                         lr=args.learning_rate, weight_decay=args.weight_decay)
    else:
        opt = optim.Adam((list(emotic_model.parameters()) + list(model_context.parameters()) + list(
            model_body.parameters())),
                         lr=args.learning_rate, weight_decay=args.weight_decay)
    scheduler = StepLR(opt, step_size=7, gamma=0.1)
    disc_loss = DiscreteLoss(args.discrete_loss_weight_type, device)
    if args.continuous_loss_type == 'Smooth L1':
        cont_loss = ContinuousLoss_SL1()
    else:
        cont_loss = ContinuousLoss_L2()

    train_writer = SummaryWriter(train_log_path)
    val_writer = SummaryWriter(val_log_path)

    # training
    train_data(opt, scheduler, [model_context, model_body, emotic_model, model_context_seg, model_context_depth],
               device, train_loader, val_loader, disc_loss, cont_loss, train_writer, val_writer, model_path, args)
    # validation
    test_data([model_context, model_body, emotic_model, model_context_seg, model_context_depth], device, val_loader,
              ind2cat, ind2vad, len(val_dataset), result_dir=result_path, test_type='val', args=args)
Ejemplo n.º 6
0
        fp.write(line)
        fp.write(linesep)
    fp.close()


if mode == "train":
    if len(argv) != 5:
        print(
            "USAGE: python topics.py mode dataset-directory model-file [fraction]"
        )
        exit(1)
    fraction = float(argv[4])
    print("Training started.")
    print("Please wait as it could take a while to create model...")
    model = train.train_data(dataset_dir, fraction)
    write_top_words_to_file(model, "distinctive_words.txt")
    print("Top 10 words at each topic is written into distinctive_words.txt.")
    serialize_model(model, model_file)
    print("Model File created")
    exit(0)

if mode == "test":
    model = deserialize_model(model_file)
    print("Model loaded successfully.")
    result = test.test_data(model, dataset_dir)
    print(result)
    exit(0)

print("Operation not supported")
exit(1)
Ejemplo n.º 7
0
#percent = input("Enter the percentage of images to train against (0.5 for 50%): ")
#algorithm = input("Enter the algorithm to use ('p' for perceptron, 'n' for naive bayes, 'o' other algortihm TBD): ")

start = timeit.default_timer()
type = 'd'
percent = 1.0
algorithm = 'm'

num_x_regions = 7
num_y_regions = 7

labels = training_labels(type, 1.0)
data_regions = training_data(type, 1.0, num_x_regions, num_y_regions)

test_labels = test_labels(type, 1.0)
test_data_regions = test_data(type, 1.0, num_x_regions, num_y_regions)

sum = 0
vals = []
runs = 10
if algorithm == 'n':
    for i in range(runs):
        ind_start = timeit.default_timer()
        if type == 'f':
            val = naive_bayes_f(labels, data_regions, percent, test_labels,
                                test_data_regions)
            sum += val
            vals.append(val)
        else:
            val = naive_bayes_d(labels, data_regions, percent, test_labels,
                                test_data_regions)