Пример #1
0
def get_train_and_test():
    import utils.datasets as ds
    datasets = ds.create_dataset()
    dev = ds.get_X_dataset(datasets,
                           "../dataset/traindev/rumoureval-subtaskA-dev.json")
    train = ds.get_X_dataset(
        datasets, "../dataset/traindev/rumoureval-subtaskA-train.json")
    return train, dev
Пример #2
0
def predict_tests():
    # Test FashionAttrsDataset
    csv_file = '/home/wangx/datasets/fashionAI/rank/Tests/question.csv'
    root_dir = '/home/wangx/datasets/fashionAI/rank'
    fashion_dataset = FashionAttrsDataset(csv_file, root_dir, mode='?')
    assert_true(len(fashion_dataset) > 0)

    # Test create_dataset
    csv_file = '/home/wangx/project/torchfashion/questions/{}_{}.csv'
    root_dir = '/home/wangx/datasets/fashionAI/rank'
    for t in order:
        out = create_dataset(label_type=t,
                             csv_file=csv_file,
                             root_dir=root_dir,
                             phase=['test'],
                             label_mode='?')
        # Test image_datset and dataset_size
        assert_true(
            len(out['image_datasets']['test']) == out['dataset_sizes']['test'])
        # Test dataloader
        loader = out['dataloaders']['test']
        batch = next(iter(loader))
        assert_equal(list(batch['image'].shape), [32, 3, 224, 224])

    # Test predict_model
    out = create_dataset('coat_length_labels',
                         csv_file=csv_file,
                         root_dir=root_dir,
                         phase=['test'],
                         label_mode='?')
    dataloader = out['dataloaders']['test']
    model_conv = torchvision.models.resnet34()
    # Parameters of newly constructed modules have requires_grad=True by default
    num_ftrs = model_conv.fc.in_features
    model_conv.fc = nn.Linear(num_ftrs, AttrKey['coat_length_labels'])

    use_gpu = torch.cuda.is_available()
    if use_gpu:
        model_conv = model_conv.cuda()

    saved_model = './log/resnet34-transfer/coat_length_labels.pth'
    result = predict_model(model_conv, saved_model, dataloader, use_gpu)
    assert_equal(len(result), 1453)
Пример #3
0
def main():
    str0 = "Dataset created in:"
    chrono = Chronometer()
    datasets = ds.create_dataset()
    test = ds.get_X_dataset(
        datasets, "../dataset/traindev/rumoureval-subtaskA-dev.json")
    train = ds.get_X_dataset(
        datasets, "../dataset/traindev/rumoureval-subtaskA-train.json")
    train, all_words, freq = vc.vectorise_freq_train(train)
    test = vc.vectorise_freq_test(test, all_words, freq)
    train_vecs, train_labels = vc.get_vec_label(train)
    test_vecs, test_labels = vc.get_vec_label(test)
    str0 += chrono.str_flow_out()
    chrono.rinit()
    nb_model = nb.create_nb_model(train_vecs, train_labels)
    pred_label = nb.nb_predict(nb_model, test_vecs, test_labels)
    str0 += "\nModel Naive bayes:\n"
    str0 += ms.all_mesure(pred_label)
    str0 += chrono.str_flow_out() + "\n"
    chrono.rinit()
    # list_label = sorted(list(set(train_labels)))
    # train_labels = nn.convert_label_in_number(train_labels, list_label)
    nn_model = nn.create_trained_nn(train_vecs, train_labels, epochs=50)
    # test_labels = nn.convert_label_in_number(test_labels, list_label)
    pred_label = nn.convert_number_to_label(
        nn.predict_nn(nn_model, test_vecs, test_labels), list_label)
    str0 += "\nModel Neural Network:\nEpochs: 50\n"
    str0 += ms.all_mesure(pred_label)
    str0 += chrono.str_flow_out()
    loss, acc = nn_model.evaluate(test_vecs, test_labels)
    steps = [[loss, acc, chrono.stop(), 50]]
    str0 += "\n\nAccuracy Keras:{} \nLoss Keras: {} \n".format(acc, loss)
    for i in range(100, 1050, 50):
        str0 += "\nEpochs: {}\n".format(i)
        nn_model = nn.retrain_model(nn_model, train_vecs, train_labels, 50)
        pred_label = nn.convert_number_to_label(
            nn.predict_nn(nn_model, test_vecs, test_labels), list_label)
        str0 += ms.all_mesure(pred_label)
        str0 += chrono.str_flow_out()
        loss, acc = nn_model.evaluate(test_vecs, test_labels)
        steps.append([loss, acc, chrono.stop(), i])
        str0 += "\n Accuracy Keras:{} \n Loss Keras: {} \n".format(acc, loss)

    with open('result_server.txt', 'w') as stream:
        json.dump(str0, stream)
    with open('steps.json', 'w') as stream:
        json.dump(steps, stream)
    print(str0)
    print(chrono.str_flow_out())
Пример #4
0
def datasets_tests():
    csv_file =  '/home/wangx/datasets/fashionAI/web/Annotations/skirt_length_labels_train.csv'
    root_dir = '/home/wangx/datasets/fashionAI/web/'
    
    # Test FashionAttrsDataset
    fashion_dataset = FashionAttrsDataset(csv_file, root_dir)
    assert_true(len(fashion_dataset)>0)

    # Test create_dataset
    datasetd = create_dataset('skirt_length_labels')

    # Test image_datasets
    image_datasets = datasetd['image_datasets']
    for _, image_dataset in image_datasets.items():
        assert_true(len(image_dataset) > 0)

    # Test dataloaders
    dataloaders = datasetd['dataloaders']
    for _, dataloader in dataloaders.items():
        next_batch = next(iter(dataloader))
        assert_equal(list(next_batch['image'].shape), [32, 3, 224, 224])
Пример #5
0
    'skirt_length_labels': 'resnet18_c',
    'sleeve_length_labels': 'inceptionresnetv2_c',
}

saved_model = './log/{}/{}.pth'
question_file = './questions_b/{}_{}.csv'
root_dir = '/home/wangx/datasets/fashionAI/z_rank'
answer = './questions_b/' + args.answer + '.csv'

# Iterate each attributes
for t in order:
    # Create dataloader for each attribute
    out = create_dataset(t,
                         csv_file=question_file,
                         root_dir=root_dir,
                         phase=['test'],
                         label_mode='?',
                         shuffle=False,
                         img_size=ImgSizeKey[t],
                         batch_size=8)
    dataloader = out['dataloaders']['test']

    # Create CNN model
    use_gpu = torch.cuda.is_available()
    model_conv = create_model(model_key=ModelKey[t],
                              pretrained=False,
                              num_of_classes=AttrKey[t],
                              use_gpu=use_gpu)

    print('start write {}...'.format(t))
    result = predict_model(model_conv, saved_model.format(SaveFolderKey[t], t),
                           dataloader, use_gpu)
Пример #6
0
    'coat_length_labels': 8,
    'collar_design_labels': 5,
    'lapel_design_labels': 5,
    'neck_design_labels': 5,
    'neckline_design_labels': 10,
    'pant_length_labels': 6,
    'skirt_length_labels': 6,
    'sleeve_length_labels': 9,
}

# Create dataloader
root_dir = '/home/wangx/datasets/fashionAI/base/'
csv_file = './data/' + args.csv_folder + '/{}_{}.csv'
out = create_dataset(args.attribute,
                     csv_file=csv_file,
                     root_dir=root_dir,
                     img_size=args.img_size,
                     batch_size=args.batch_size)
dataloaders = out['dataloaders']
dataset_sizes = out['dataset_sizes']

# Create CNN model
use_gpu = torch.cuda.is_available()
model_conv = create_model(
    model_key=args.model,
    pretrained=eval(args.pretrained),
    num_of_classes=AttrKey[args.attribute],
    use_gpu=use_gpu,
)

criterion = nn.CrossEntropyLoss()