hidden_dim = 150
    sentence_len = None
    train_file = os.path.join(DATA_DIR, TRAIN_FILE)
    test_file = os.path.join(DATA_DIR, TEST_FILE)
    fp_train = open(train_file, 'r')
    train_filenames = [
        os.path.join(TRAIN_DIR, line.strip()) for line in fp_train
    ]
    filenames = copy.deepcopy(train_filenames)
    fp_train.close()
    fp_test = open(test_file, 'r')
    test_filenames = [os.path.join(TEST_DIR, line.strip()) for line in fp_test]
    fp_test.close()
    filenames.extend(test_filenames)

    corpus = DP.Corpus(DATA_DIR, filenames)
    nlabel = 8

    # create model
    model = LSTMC.LSTMClassifier(embedding_dim=embedding_dim,
                                 hidden_dim=hidden_dim,
                                 vocab_size=len(corpus.dictionary),
                                 label_size=nlabel,
                                 batch_size=batch_size,
                                 use_gpu=use_gpu,
                                 attn_flag=attn_flag)
    if use_gpu:
        model = model.cuda()
    # data processing
    dtrain_set = DP.TxtDatasetProcessing(DATA_DIR, TRAIN_DIR, TRAIN_FILE,
                                         TRAIN_LABEL, sentence_len, corpus)
Esempio n. 2
0
                    help='hyper parameter, default=50')
opt = parser.parse_args()
print(opt)

os.environ['CUDA_VISIBLE_DEVICES'] = str(opt.gpuid)

transformations = transforms.Compose([
    transforms.Resize(opt.imagesize),
    transforms.CenterCrop(opt.imagesize),
    transforms.ToTensor(),
    transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])

print('pre-process data...')
dset_database = DP.DatasetProcessingCIFAR_10(opt.dataroot, opt.database,
                                             opt.database_label,
                                             transformations)
dset_test = DP.DatasetProcessingCIFAR_10(opt.dataroot, opt.test_file,
                                         opt.test_label, transformations)

print('loading test data...')
num_database, num_test = len(dset_database), len(dset_test)
database_loader = DataLoader(dset_database,
                             batch_size=opt.batchsize,
                             shuffle=False,
                             num_workers=4)
test_loader = DataLoader(dset_test,
                         batch_size=opt.batchsize,
                         shuffle=False,
                         num_workers=4)
Esempio n. 3
0
def DPSH_algo(bit, param, gpu_ind=0):
    # parameters setting
    os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_ind)

    DATA_DIR = 'data/CIFAR-10'
    DATABASE_FILE = 'database_img.txt'
    TRAIN_FILE = 'train_img.txt'
    TEST_FILE = 'test_img.txt'

    DATABASE_LABEL = 'database_label.txt'
    TRAIN_LABEL = 'train_label.txt'
    TEST_LABEL = 'test_label.txt'

    batch_size = 128
    epochs = 150
    learning_rate = 0.05
    weight_decay = 10**-5
    model_name = 'alexnet'
    nclasses = 10
    use_gpu = torch.cuda.is_available()

    filename = param['filename']

    lamda = param['lambda']
    param['bit'] = bit
    param['epochs'] = epochs
    param['learning rate'] = learning_rate
    param['model'] = model_name

    ### data processing
    transformations = transforms.Compose([
        transforms.Scale(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])
    dset_database = DP.DatasetProcessingCIFAR_10(DATA_DIR, DATABASE_FILE,
                                                 DATABASE_LABEL,
                                                 transformations)

    dset_train = DP.DatasetProcessingCIFAR_10(DATA_DIR, TRAIN_FILE,
                                              TRAIN_LABEL, transformations)

    dset_test = DP.DatasetProcessingCIFAR_10(DATA_DIR, TEST_FILE, TEST_LABEL,
                                             transformations)

    num_database, num_train, num_test = len(dset_database), len(
        dset_train), len(dset_test)

    database_loader = DataLoader(dset_database,
                                 batch_size=batch_size,
                                 shuffle=False,
                                 num_workers=4)

    train_loader = DataLoader(dset_train,
                              batch_size=batch_size,
                              shuffle=True,
                              num_workers=4)

    test_loader = DataLoader(dset_test,
                             batch_size=batch_size,
                             shuffle=False,
                             num_workers=4)

    ### create model
    model = CreateModel(model_name, bit, use_gpu)
    optimizer = optim.SGD(model.parameters(),
                          lr=learning_rate,
                          weight_decay=weight_decay)

    ### training phase
    # parameters setting
    B = torch.zeros(num_train, bit)
    U = torch.zeros(num_train, bit)
    train_labels = LoadLabel(TRAIN_LABEL, DATA_DIR)
    train_labels_onehot = EncodingOnehot(train_labels, nclasses)
    test_labels = LoadLabel(TEST_LABEL, DATA_DIR)
    test_labels_onehot = EncodingOnehot(test_labels, nclasses)

    train_loss = []
    map_record = []

    totloss_record = []
    totl1_record = []
    totl2_record = []
    t1_record = []

    Sim = CalcSim(train_labels_onehot, train_labels_onehot)

    for epoch in range(epochs):
        epoch_loss = 0.0
        ## training epoch
        for iter, traindata in enumerate(train_loader, 0):
            train_input, train_label, batch_ind = traindata
            train_label = torch.squeeze(train_label)
            if use_gpu:
                train_label_onehot = EncodingOnehot(train_label, nclasses)
                train_input, train_label = Variable(
                    train_input.cuda()), Variable(train_label.cuda())
                S = CalcSim(train_label_onehot, train_labels_onehot)
            else:
                train_label_onehot = EncodingOnehot(train_label, nclasses)
                train_input, train_label = Variable(train_input), Variable(
                    train_label)
                S = CalcSim(train_label_onehot, train_labels_onehot)

            model.zero_grad()
            train_outputs = model(train_input)
            for i, ind in enumerate(batch_ind):
                U[ind, :] = train_outputs.data[i]
                B[ind, :] = torch.sign(train_outputs.data[i])

            Bbatch = torch.sign(train_outputs)
            if use_gpu:
                theta_x = train_outputs.mm(Variable(U.cuda()).t()) / 2
                logloss = (Variable(S.cuda())*theta_x - Logtrick(theta_x, use_gpu)).sum() \
                        / (num_train * len(train_label))
                regterm = (Bbatch - train_outputs).pow(2).sum() / (
                    num_train * len(train_label))
            else:
                theta_x = train_outputs.mm(Variable(U).t()) / 2
                logloss = (Variable(S)*theta_x - Logtrick(theta_x, use_gpu)).sum() \
                        / (num_train * len(train_label))
                regterm = (Bbatch - train_outputs).pow(2).sum() / (
                    num_train * len(train_label))

            loss = -logloss + lamda * regterm
            loss.backward()
            optimizer.step()
            epoch_loss += loss.data[0]

            # print('[Training Phase][Epoch: %3d/%3d][Iteration: %3d/%3d] Loss: %3.5f' % \
            #       (epoch + 1, epochs, iter + 1, np.ceil(num_train / batch_size),loss.data[0]))
        print('[Train Phase][Epoch: %3d/%3d][Loss: %3.5f]' %
              (epoch + 1, epochs, epoch_loss / len(train_loader)),
              end='')
        optimizer = AdjustLearningRate(optimizer, epoch, learning_rate)

        l, l1, l2, t1 = Totloss(U, B, Sim, lamda, num_train)
        totloss_record.append(l)
        totl1_record.append(l1)
        totl2_record.append(l2)
        t1_record.append(t1)

        print(
            '[Total Loss: %10.5f][total L1: %10.5f][total L2: %10.5f][norm theta: %3.5f]'
            % (l, l1, l2, t1),
            end='')

        ### testing during epoch
        qB = GenerateCode(model, test_loader, num_test, bit, use_gpu)
        tB = torch.sign(B).numpy()
        map_ = CalcHR.CalcMap(qB, tB, test_labels_onehot.numpy(),
                              train_labels_onehot.numpy())
        train_loss.append(epoch_loss / len(train_loader))
        map_record.append(map_)

        print('[Test Phase ][Epoch: %3d/%3d] MAP(retrieval train): %3.5f' %
              (epoch + 1, epochs, map_))
        print(len(train_loader))
    ### evaluation phase
    ## create binary code
    model.eval()
    database_labels = LoadLabel(DATABASE_LABEL, DATA_DIR)
    database_labels_onehot = EncodingOnehot(database_labels, nclasses)
    qB = GenerateCode(model, test_loader, num_test, bit, use_gpu)
    dB = GenerateCode(model, database_loader, num_database, bit, use_gpu)

    map = CalcHR.CalcMap(qB, dB, test_labels_onehot.numpy(),
                         database_labels_onehot.numpy())
    print('[Retrieval Phase] MAP(retrieval database): %3.5f' % map)

    result = {}
    result['qB'] = qB
    result['dB'] = dB
    result['train loss'] = train_loss
    result['map record'] = map_record
    result['map'] = map
    result['param'] = param
    result['total loss'] = totloss_record
    result['l1 loss'] = totl1_record
    result['l2 loss'] = totl2_record
    result['norm theta'] = t1_record
    result['filename'] = filename

    return result
Esempio n. 4
0
def DPSH_algo(bit, param, gpu_ind=0):
    # parameters setting
    # os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_ind)

    DATA_DIR = '/home/gpuadmin/datasets/CIFAR-10'
    DATABASE_FILE = 'database_img.txt'
    TRAIN_FILE = 'train_img.txt'
    TEST_FILE = 'test_img.txt'

    DATABASE_LABEL = 'database_label.txt'
    TRAIN_LABEL = 'train_label.txt'
    TEST_LABEL = 'test_label.txt'
    batch_size = 64
    # batch_size = 256
    # batch_size = 150
    epochs = 200
    learning_rate = 0.05
    weight_decay = 10**-5
    # model_name = 'APN'
    # model_name = 'alexnet'
    # model_name = 'resnet18'
    # model_name = 'vgg11'
    # model_name = 'resnet34'

    model_name = 'resnet34resnet34'
    # model_name = 'resnet50'

    # model_name = 'dpn92'
    # model_name = 'resnet50'
    # model_name = 'resnet50_cbam'
    # model_name = 'resnet18_cbam'
    # model_name = 'resnet34_cbam'
    # model_name = 'resnet18_pc'
    # model_name = 'resnet34_pc'
    # model_name = 'resnet101_cbam'
    # model_name = 'resnet152_cbam'
    print(model_name)
    nclasses = 10
    use_gpu = torch.cuda.is_available()

    filename = param['filename']

    lamda = param['lambda']
    param['bit'] = bit
    param['epochs'] = epochs
    param['learning rate'] = learning_rate
    param['model'] = model_name

    ### data processing
    transformations = transforms.Compose([
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])
    dset_database = DP.DatasetProcessingCIFAR_10(DATA_DIR, DATABASE_FILE,
                                                 DATABASE_LABEL,
                                                 transformations)

    dset_train = DP.DatasetProcessingCIFAR_10(DATA_DIR, TRAIN_FILE,
                                              TRAIN_LABEL, transformations)

    dset_test = DP.DatasetProcessingCIFAR_10(DATA_DIR, TEST_FILE, TEST_LABEL,
                                             transformations)

    num_database, num_train, num_test = len(dset_database), len(
        dset_train), len(dset_test)

    database_loader = DataLoader(dset_database,
                                 batch_size=batch_size,
                                 shuffle=False,
                                 num_workers=4)

    train_loader = DataLoader(dset_train,
                              batch_size=batch_size,
                              shuffle=True,
                              num_workers=4)

    test_loader = DataLoader(dset_test,
                             batch_size=batch_size,
                             shuffle=False,
                             num_workers=4)

    ### create model
    model = CreateModel(model_name, bit, use_gpu)
    model = torch.nn.DataParallel(model).cuda()
    # model = AlexNetPlusLatent(bit).cuda()
    print(model)
    optimizer = optim.SGD(model.parameters(),
                          lr=learning_rate,
                          weight_decay=weight_decay)

    ### training phase
    # parameters setting
    B = torch.zeros(num_train, bit)
    U = torch.zeros(num_train, bit)
    train_labels = LoadLabel(TRAIN_LABEL, DATA_DIR)
    train_labels_onehot = EncodingOnehot(train_labels, nclasses)
    test_labels = LoadLabel(TEST_LABEL, DATA_DIR)
    test_labels_onehot = EncodingOnehot(test_labels, nclasses)

    train_loss = []
    map_record = []

    totloss_record = []
    totl1_record = []
    totl2_record = []
    t1_record = []
    softmaxloss = torch.nn.CrossEntropyLoss().cuda()
    Sim = CalcSim(train_labels_onehot, train_labels_onehot)
    for epoch in range(epochs):
        epoch_loss = 0.0
        ## training epoch
        for iter, traindata in enumerate(train_loader, 0):
            train_input, train_label, batch_ind = traindata
            train_label = torch.squeeze(train_label)
            if use_gpu:
                train_label_onehot = EncodingOnehot(train_label, nclasses)
                train_input, train_label = Variable(
                    train_input.cuda()), Variable(train_label.cuda())
                S = CalcSim(train_label_onehot, train_labels_onehot)
            else:
                train_label_onehot = EncodingOnehot(train_label, nclasses)
                train_input, train_label = Variable(train_input), Variable(
                    train_label)
                S = CalcSim(train_label_onehot, train_labels_onehot)

            model.zero_grad()
            train_outputs = model(train_input)

            for i, ind in enumerate(batch_ind):
                U[ind, :] = train_outputs.data[i]
                B[ind, :] = torch.sign(train_outputs.data[i])

            Bbatch = torch.sign(train_outputs)
            # Bbatch1 = torch.sign(ym)
            if use_gpu:
                theta_x = train_outputs.mm(Variable(U.cuda()).t()) / 2
                logloss = (Variable(S.cuda())*theta_x - Logtrick(theta_x, use_gpu)).sum() \
                        / (num_train * len(train_label))
                regterm = (Bbatch - train_outputs).pow(2).sum() / (
                    num_train * len(train_label)
                )  #+(Bbatch1-ym).pow(2).sum() / (num_train * len(train_label))
            else:
                theta_x = train_outputs.mm(Variable(U).t()) / 2
                logloss = (Variable(S)*theta_x - Logtrick(theta_x, use_gpu)).sum() \
                        / (num_train * len(train_label))
                regterm = (Bbatch - train_outputs).pow(2).sum() / (
                    num_train * len(train_label))
            # l2loss = softmaxloss(c,train_label)#+ softmaxloss(cm,train_label)
            # Qloss = torch.mean((torch.abs(train_outputs) - 1) ** 2)

            # loss1 = nn.BCELoss()
            # output1 = loss1(nn.Sigmoid(c), train_label)

            # loss2 = nn.L1Loss()
            # output2 = loss2(c.cuda(), train_label)

            # loss3 = nn.MSELoss()
            # output3 = loss3(c.cuda(), train_label)
            # criterion_xent = nn.CrossEntropyLoss()
            # # criterion_cent = CenterLoss(nclasses, feat_dim=bit, use_gpu=use_gpu)
            # FocalLoss_loss = FocalLoss(nclasses, use_gpu)
            # loss_xent = criterion_xent(c, train_label)
            # loss_cent = criterion_cent(train_outputs, train_label)

            Qloss = torch.mean((torch.abs(train_outputs) - 1)**2) + torch.mean(
                (torch.abs(Bbatch) - 1)**2
            )  #+torch.mean((torch.abs(ym) - 1) ** 2)+torch.mean((torch.abs(Bbatch1) - 1) ** 2)   #  +torch.mean((torch.abs(Bbatch - train_outputs) ) ** 2)
            loss = -logloss + lamda * regterm  #+l2loss+Qloss   # +   FocalLoss_loss       #     #    +loss_xent+loss_cent#+loss2+loss3#+loss1#
            loss.backward()
            optimizer.step()
            epoch_loss += loss.item()

            # print('[Training Phase][Epoch: %3d/%3d][Iteration: %3d/%3d] Loss: %3.5f' % \
            #       (epoch + 1, epochs, iter + 1, np.ceil(num_train / batch_size),loss.data[0]))
        print('[Train Phase][Epoch: %3d/%3d][Loss: %3.5f]' %
              (epoch + 1, epochs, epoch_loss / len(train_loader)),
              end='')
        optimizer = AdjustLearningRate(optimizer, epoch, learning_rate)

        l, l1, l2, t1 = Totloss(U, B, Sim, lamda, num_train)
        totloss_record.append(l)
        totl1_record.append(l1)
        totl2_record.append(l2)
        t1_record.append(t1)

        print(
            '[Total Loss: %10.5f][total L1: %10.5f][total L2: %10.5f][norm theta: %3.5f]'
            % (l, l1, l2, t1),
            end='')

        ### testing during epoch
        qB = GenerateCode(model, test_loader, num_test, bit, use_gpu)
        #tB = torch.sign(B).numpy()
        tB = GenerateCode(model, train_loader, num_train, bit, use_gpu)
        map_ = CalcHR.CalcTopMap(qB, tB, test_labels_onehot.numpy(),
                                 train_labels_onehot.numpy(), 5000)
        train_loss.append(epoch_loss / len(train_loader))
        map_record.append(map_)

        print('[Test Phase ][Epoch: %3d/%3d] MAP(retrieval train): %3.5f' %
              (epoch + 1, epochs, map_))
        print(len(train_loader))
    ### evaluation phase
    ## create binary code
    model.eval()
    database_labels = LoadLabel(DATABASE_LABEL, DATA_DIR)
    database_labels_onehot = EncodingOnehot(database_labels, nclasses)
    qB = GenerateCode(model, test_loader, num_test, bit, use_gpu)
    dB = GenerateCode(model, database_loader, num_database, bit, use_gpu)

    map = CalcHR.CalcTopMap(qB, dB, test_labels_onehot.numpy(),
                            database_labels_onehot.numpy(), 5000)
    print('[Retrieval Phase] MAP(retrieval database): %3.5f' % map)
    # print('[Retrieval Phase] MAP(retrieval database): %3.5f' % map)
    test_binary = 0.5 * (qB + 1)
    database_binary = 0.5 * (dB + 1)
    test_labels = LoadLabel(TEST_LABEL, DATA_DIR)
    database_labels = LoadLabel(DATABASE_LABEL, DATA_DIR)
    sum_p, sum_r = precision(database_binary, database_labels, test_binary,
                             test_labels)
    if not os.path.isdir('result/' + str(bit)):
        os.mkdir('result/' + str(bit))
    np.savetxt('./result' + '/' + str(bit) + '/' + 'sum_p.txt',
               sum_p,
               fmt='%3.5f')
    np.savetxt('./result' + '/' + str(bit) + '/' + 'sum_r.txt',
               sum_r,
               fmt='%3.5f')
    np.savetxt('./result' + '/' + str(bit) + '/' + 'map',
               map.reshape(1, -1),
               fmt='%3f')
    result = {}
    result['qB'] = qB
    result['dB'] = dB
    result['train loss'] = train_loss
    result['map record'] = map_record
    result['map'] = map
    result['param'] = param
    result['total loss'] = totloss_record
    result['l1 loss'] = totl1_record
    result['l2 loss'] = totl2_record
    result['norm theta'] = t1_record
    result['filename'] = filename
    # if not os.path.isdir('result'):
    # os.mkdir('result')
    # torch.save(qB, './result/qB.txt')
    # torch.save(dB, './result/dB.txt')
    # torch.save(map, './result/map.txt')
    #  np.savetxt('./result/qB.txt', qB,fmt='%d',delimiter=',')

    return result
Esempio n. 5
0
def DenseHash_RF_algo(bit, param, gpu_ind=0):
    # parameters setting
    #os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_indi)

    #os.environ['CUDA_VISIBLE_DEVICES'] = '0,1'
    os.environ['CUDA_VISIBLE_DEVICES'] = '0,1'

    #DATA_DIR = 'data/CIFAR-10'
    DATA_DIR = '/home/zhangjingyi/res/DPSH-pytorch/data/CIFAR-10'
    DATABASE_FILE = 'database_img.txt'
    TRAIN_FILE = 'train_img.txt'
    TEST_FILE = 'test_img.txt'

    DATABASE_LABEL = 'database_label.txt'
    TRAIN_LABEL = 'train_label.txt'
    TEST_LABEL = 'test_label.txt'

    batch_size = 64
    epochs = 30
    learning_rate = 0.003
    weight_decay = 10**-5
    model_name = 'vgg16'
    #model_name = 'alexnet'
    nclasses = 10
    use_gpu = torch.cuda.is_available()

    filename = param['filename']
    print('pkl file name %s' % filename)

    lamda = param['lambda']
    param['bit'] = bit
    param['epochs'] = epochs
    param['learning rate'] = learning_rate
    param['model'] = model_name
    print('**********************************')
    print('Solver Settings:')
    print(param)

    ### data processing
    transformations = transforms.Compose([
        transforms.Scale(256),
        #transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])
    dset_database = DP.DatasetProcessingCIFAR_10(DATA_DIR, DATABASE_FILE,
                                                 DATABASE_LABEL,
                                                 transformations)

    dset_train = DP.DatasetProcessingCIFAR_10(DATA_DIR, TRAIN_FILE,
                                              TRAIN_LABEL, transformations)

    dset_test = DP.DatasetProcessingCIFAR_10(DATA_DIR, TEST_FILE, TEST_LABEL,
                                             transformations)

    num_database, num_train, num_test = len(dset_database), len(
        dset_train), len(dset_test)

    database_loader = DataLoader(dset_database,
                                 batch_size=batch_size,
                                 shuffle=False,
                                 num_workers=4)

    train_loader = DataLoader(dset_train,
                              batch_size=batch_size,
                              shuffle=True,
                              num_workers=4)

    test_loader = DataLoader(dset_test,
                             batch_size=batch_size,
                             shuffle=False,
                             num_workers=4)

    ### create model
    model = CreateModel(model_name, bit, use_gpu)

    optimizer = optim.SGD(model.parameters(),
                          lr=learning_rate,
                          weight_decay=weight_decay)

    ### training phase
    # parameters setting
    B = torch.sign(torch.randn(num_train, bit))
    U = torch.sign(torch.randn(num_train, bit))
    train_labels = LoadLabel(TRAIN_LABEL, DATA_DIR)
    #print(train_labels)

    train_labels_onehot = EncodingOnehot(train_labels, nclasses)
    test_labels = LoadLabel(TEST_LABEL, DATA_DIR)
    test_labels_onehot = EncodingOnehot(test_labels, nclasses)

    Y = train_labels_onehot
    #Sim = CalcSim(train_labels_onehot, train_labels_onehot)
    #file = open(filename.replace('snapshot/','log/').replace('.pkl','.log'),'a')
    for epoch in range(epochs):
        model.train()
        start_time = time.time()
        epoch_loss = 0.0
        # D  step
        temp1 = Y.t().mm(Y) + torch.eye(nclasses)
        temp1 = temp1.inverse()
        temp1 = temp1.mm(Y.t())
        D = temp1.mm(B)

        # B step

        B = torch.sign(Y.mm(D) + 1e-5 * U)
        print('[Epoch %3d B step time cost: %3.5fs]' %
              (epoch + 1, time.time() - start_time))

        # F step
        ## training epoch
        ave_iter_loss = 0.0
        for iter, traindata in enumerate(train_loader, 0):
            iter_timer = time.time()
            train_input, train_label, batch_ind = traindata
            train_input = Variable(train_input.cuda())
            model.zero_grad()
            train_outputs, f = model(train_input)
            #print(train_outputs.size())

            temp = torch.zeros(train_outputs.data.size())
            for i, ind in enumerate(batch_ind):
                temp[i, :] = B[ind, :]
                U[ind, :] = train_outputs.data[i]

            temp = Variable(temp.cuda())

            loss = (temp - train_outputs).pow(2).sum() / (batch_size)

            loss.backward()
            optimizer.step()
            epoch_loss += loss.data[0]
            ave_iter_loss += loss.data[0]

            if iter % 20 == 0:
                print('[Iteration %d][%3.2fs/iter][Iter Loss: %3.5f]' %
                      (iter + epoch * len(train_loader),
                       time.time() - iter_timer, ave_iter_loss / 20))
                ave_iter_loss = 0

        print('[Train Phase][Epoch: %3d/%3d][Loss: %3.5f]' %
              (epoch + 1, epochs, epoch_loss / len(train_loader)))
        optimizer = AdjustLearningRate(optimizer, epoch, learning_rate)

        ### testing during epoch
        test_timer = time.time()
        model.eval()
        qB, f = GenerateCode(model, test_loader, num_test, bit, use_gpu)
        tB = torch.sign(U).numpy()
        f = f.cpu().data.numpy()
        np.save('f.npy', f)
        map_ = CalcHR.CalcMap(qB, tB, test_labels_onehot.numpy(),
                              train_labels_onehot.numpy())
        print('[Test Phase ][Epoch: %3d/%3d] MAP(retrieval train): %3.5f' %
              (epoch + 1, epochs, map_))
        map_topk = CalcHR.CalcTopMap(qB, tB, test_labels_onehot.numpy(),
                                     train_labels_onehot.numpy(), 50)
        print(
            '[Test Phase ][Epoch: %3d/%3d] MAP@top50(retrieval train): %3.5f' %
            (epoch + 1, epochs, map_topk))
        map_topk = CalcHR.CalcTopMap(qB, tB, test_labels_onehot.numpy(),
                                     train_labels_onehot.numpy(), 500)
        print(
            '[Test Phase ][Epoch: %3d/%3d] MAP@top500(retrieval train): %3.5f'
            % (epoch + 1, epochs, map_topk))
        acc_topk = CalcHR.CalcTopAcc(qB, tB, test_labels_onehot.numpy(),
                                     train_labels_onehot.numpy(), 50)
        print(
            '[Test Phase ][Epoch: %3d/%3d] Precision@top50(retrieval train): %3.5f'
            % (epoch + 1, epochs, acc_topk))
        acc_topk = CalcHR.CalcTopAcc(qB, tB, test_labels_onehot.numpy(),
                                     train_labels_onehot.numpy(), 500)
        print(
            '[Test Phase ][Epoch: %3d/%3d] Precision@top500(retrieval train): %3.5f'
            % (epoch + 1, epochs, acc_topk))
        print('[Test time cost: %d]' % (time.time() - test_timer))

        print('[Epoch %3d time cost: %ds]' %
              (epoch + 1, time.time() - start_time))

        ### evaluation phase
        ## create binary code
        if (epoch + 1) % epochs == 0:
            eval_timer = time.time()
            model.eval()
            database_labels = LoadLabel(DATABASE_LABEL, DATA_DIR)
            database_labels_onehot = EncodingOnehot(database_labels, nclasses)
            qB, _ = GenerateCode(model, test_loader, num_test, bit, use_gpu)
            dB, _ = GenerateCode(model, database_loader, num_database, bit,
                                 use_gpu)

            map = CalcHR.CalcMap(qB, dB, test_labels_onehot.numpy(),
                                 database_labels_onehot.numpy())
            print('[Retrieval Phase] MAP(retrieval database): %3.5f' % map)
            map_topk = CalcHR.CalcTopMap(qB, dB, test_labels_onehot.numpy(),
                                         database_labels_onehot.numpy(), 500)
            print('[Retrieval Phase] MAP@500(retrieval database): %3.5f' %
                  map_topk)
            map_topk = CalcHR.CalcTopMap(qB, dB, test_labels_onehot.numpy(),
                                         database_labels_onehot.numpy(), 5000)
            print('[Retrieval Phase] MAP@5000(retrieval database): %3.5f' %
                  map_topk)
            acc_topk = CalcHR.CalcTopAcc(qB, dB, test_labels_onehot.numpy(),
                                         database_labels_onehot.numpy(), 500)
            print(
                '[Retrieval Phase] Precision@500(retrieval database): %3.5f' %
                acc_topk)
            acc_topk = CalcHR.CalcTopAcc(qB, dB, test_labels_onehot.numpy(),
                                         database_labels_onehot.numpy(), 5000)
            print(
                '[Retrieval Phase] Precision@5000(retrieval database): %3.5f' %
                acc_topk)
            print('[Eval time: %ds]' % (time.time() - eval_timer))

    ## save trained model
    torch.save(model.state_dict(), filename)

    result = {}
    result['qB'] = qB
    result['dB'] = dB
    result['map'] = map

    return result

if __name__ == '__main__':
    # parser = argparse.ArgumentParser()
    # parser.add_argument("--datapath", type=str, default="../data", help="path to the dataset")
    # parser.add_argument("--file", type=str, default="combined2.csv")
    # args = parser.parse_args()
    # file_name = args.datapath + '/' + args.file

    path = '../data/*.csv'

    for fname in glob.glob(path):
        global plotter
        plotter = Plotter(env_name='Algo Trading Project')
        print(fname)
        data = DataProcessing(fname, train_size)

        train_data, train_target = data.trainingData()

        net = Train()
        n = fname.split('/')[-1].split('.')[0]

        for epoch in range(num_epochs):
            net = trainNetwork(net, train_data, train_target, epoch, n)

        if os.path.isdir('../saved_models') == False:
            os.mkdir('../saved_models')

        torch.save(net.network.state_dict(),
                   '../saved_models/model_' + n + '.pt')
        del net