Esempio n. 1
0
        model.compile(loss='categorical_crossentropy',
                      optimizer=opt,
                      metrics=[get_categorical_accuracy_keras])
        # model.compile(loss='categorical_crossentropy',
        #               optimizer=opt,
        #               metrics=['accuracy'])
        """ Load data """
        print('dataset path', DATASET_PATH)
        output_path = ['./img_list.pkl', './label_list.pkl']
        train_dataset_path = DATASET_PATH + '/train/train_data'
        print('train_dataset_path', train_dataset_path)

        if nsml.IS_ON_NSML:
            # Caching file
            nsml.cache(train_data_loader,
                       data_path=train_dataset_path,
                       img_size=input_shape[:2],
                       output_path=output_path)
        else:
            # local에서 실험할경우 dataset의 local-path 를 입력해주세요.
            train_data_loader(train_dataset_path,
                              input_shape[:2],
                              output_path=output_path)

        with open(output_path[0], 'rb') as img_f:
            img_list = pickle.load(img_f)
        with open(output_path[1], 'rb') as label_f:
            label_list = pickle.load(label_f)

        # add grayscale
        gray = []
        for j in range(0, len(img_list)):
Esempio n. 2
0
    # model
    model = Model(inputs=model.layers[0].input,
                  outputs=model.get_layer('block5_pool').output)

    # parameter
    input_shape = (224, 224, 3)  # input image shape

    output_path = ['mac_list_v1.pkl', 'label_info_v1.pkl']
    """ Load data """
    print('dataset path', DATASET_PATH)
    train_dataset_path = DATASET_PATH + '/train/train_data'

    nsml.cache(data_loader.train_data_mac_loader,
               data_path=train_dataset_path,
               img_size=input_shape,
               output_path=output_path,
               model=model)

    with open(output_path[0], 'rb') as img_f:
        img_vecs = pickle.load(img_f)
    with open(output_path[1], 'rb') as label_f:
        label_info = pickle.load(label_f)

    # l2 norm
    img_vecs = img_vecs / np.linalg.norm(img_vecs, axis=1).reshape(-1, 1)

    # similarity
    sim = util.cal_cos_sim(img_vecs, img_vecs)

    # label_bound
Esempio n. 3
0
    if config.pause:
        nsml.paused(scope=locals())

    bTrainmode = False
    if config.mode == 'train':
        bTrainmode = True
        """ Load data """
        print(DATASET_PATH)
        output_path = ['./img_list.pkl', './label_list.pkl']
        train_dataset_path = DATASET_PATH + '/train/train_data'

        if nsml.IS_ON_NSML:
            # Caching file
            nsml.cache(train_data_loader,
                       data_path=train_dataset_path,
                       output_path=output_path)
        else:
            train_dataset_path = config.debug_data
            train_data_loader(train_dataset_path, output_path=output_path)

        with open(output_path[0], 'rb') as img_f:
            img_list = pickle.load(img_f)
        with open(output_path[1], 'rb') as label_f:
            label_list = pickle.load(label_f)

        queries, references, queries_img, reference_img \
            = convert_to_query_db_data_for_generator(img_list, label_list, input_shape, config.dev_querynum, config.dev_referencenum)
        print("mAP devset : query(%d), reference(%d) " %
              (len(queries), len(references)))
Esempio n. 4
0
    model = LadderModule()
    if config.gpu:
        model.cuda()
    optimizer = optim.Adam(model.parameters(), lr=config.lr)
    class_to_save = ClassToSave()
    bind_model(model, class_to_save, optimizer=optimizer)
    # test mode
    if config.pause:
        nsml.paused(scope=locals())

    # training mode
    if mode == 'train':
        preprocessed_file = ['./processed.pt']
        nsml.cache(preprocess,
                   output_path=preprocessed_file,
                   data=data_loader(DATASET_PATH))
        dataset = torch.load(preprocessed_file[0])
        training_data = DataLoader(dataset['train'],
                                   batch_size,
                                   shuffle=True,
                                   num_workers=4)
        testing_data = DataLoader(dataset['test'],
                                  batch_size,
                                  shuffle=False,
                                  num_workers=4)

        for epoch in range(config.epochs):
            running_loss, running_acc = 0, 0
            num_runs = 0
            model.train()