Beispiel #1
0
def test(loader, model, epoch, totals):
    """Test for one epoch on the training set"""
    model.eval()
    accuracy = []
    # test 1
    for i in range(len(totals)):
        output, target, ind = test_core(loader[i], model)
        _output = []
        _target = []
        for j in range(totals[i]):
            sum_ind = np.where(ind == j)
            _output.append(np.average(output[sum_ind[0]], 0)[np.newaxis, :])
            _target.append(target[sum_ind[0][0]])
        output = np.concatenate(_output, 0)
        target = np.array(_target)
        total = totals[i]
        _accuracy = testPerform(output, target, total)
        accuracy.append(_accuracy)
    accuracy1 = np.mean(accuracy)
    strings = "[==================test(score average)==================] | epoch:" + str(
        epoch) + " | mean accuracy:" + str(accuracy1) + " | accuracy:" + str(
            accuracy) + "\n"
    with ut.Log(strings):
        pass
    # test 2
    output, target, _ = test_core(loader[-1], model)
    total = len(target)
    # measure accuracy and record loss
    accuracy2 = testPerform(output, target, total)
    strings = "[==============================test(usual)==============================] | epoch:%d | accuracy:%f\n" % (
        epoch, accuracy2)
    with ut.Log(strings):
        pass
    return accuracy1, accuracy2
Beispiel #2
0
def learning_rate_step(scheduler, epoch):
    scheduler.step(epoch)
    lr = scheduler.get_lr()
    string = 'Learning rate:{0}\n'.format(lr)
    # print('Epoch:{0}, ' 'Learning rate:{1} |'.format(epoch, lr))
    with ut.Log(string):
        pass
def train(train_loader, model, criterion, optimizer, epoch):
    # type: (object, object, object, object, object) -> object
    """Train for one epoch on the training set"""
    # losses = AverageMeter()
    # top1 = AverageMeter()

    # switch to train mode
    model.train()

    for i, (input, target) in enumerate(train_loader):

        target = target.cuda(async=True)
        input = input.cuda()
        input_var = tc.autograd.Variable(input)
        target_var = tc.autograd.Variable(target)

        # compute output
        (output, embedding1, embedding2) = model(input_var)

        loss = criterion(output, target_var)

        # measure accuracy and record loss
        # prec1 = accuracy(output.data, target, topk=(1,))[0]
        # losses.update(loss.data[0], input.size(0))
        # top1.update(prec1[0], input.size(0))

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        if i % 50 == 0:
            pred_y = tc.max(output, 1)[1].data.squeeze()
            accuracy = sum(pred_y == target) / float(target.size(0))
            with ut.Log("[train] epoch:%d | Loss:%f | accuracy:%f\n" %
                        (epoch, loss, accuracy)):
                pass
        # if i % 100 == 0:
        #     print('Training\n Loss {loss.val:.4f} ({loss.avg:.4f})\t'
        #           'accuracy@1 {top1.val:.3f} ({top1.avg:.3f})'.format(loss=losses, top1=top1))

        # measure elapsed time

    # batch_time.update(time.time() - end)
    with ut.Log("[train] epoch :%d finished | Loss:%f | accuracy:%f\n" %
                (epoch, loss, accuracy)):
        pass
    def __init__(self, root, stdLen=250):

        files = os.listdir(root)
        files.sort()
        files = [root + i for i in files]
        self.train_data, dataSize, self.train_labels = ut.multiReadProc(files)
        self.count = len(self.train_data)
        self.maxR = max([i[1] for i in dataSize])
        self.stdLen = stdLen
Beispiel #5
0
def loadData(pklDir,
             files):  # numpy is byte data ,so must indicate that 'b' mode
    if os.path.exists(pklDir + ".npy"):
        data = np.load(pklDir + ".npy")
        # with open(pklDir,'rb') as f:
        #     data = pk.load(f)
    else:
        data = ut.multiReadProc(files)
        np.save(pklDir, data)
        # with open(pklDir,'wb') as f:
        #     pk.dump(data,f)
    return data
Beispiel #6
0
def train(loader, model, criterion, optimizer, epoch, set):
    # type: (object, object, object, object, object) -> object
    """Train for one epoch on the training set"""
    # losses = AverageMeter()
    # top1 = AverageMeter()

    # switch to train mode
    model.train()
    #
    for i, (input, target, _) in enumerate(loader):

        target = target.cuda(async=True)
        input = input.cuda()
        input_var = tc.autograd.Variable(input)
        target_var = tc.autograd.Variable(target)

        # compute output
        output = model(input_var)

        loss = criterion(output, target_var)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        if i % P.PERFORM_BATCH_FREQUENCY == 0:
            pred_y = tc.max(output, 1)[1].data.squeeze()
            accuracy = sum(pred_y == target) / float(target.size(0))
            string = "[train] epoch:%d | set:%d | Loss:%f | accuracy:%f\n" % (
                epoch, set, loss, accuracy)
            with ut.Log(string):
                pass
    # batch_time.update(time.time() - end)
    string = "[=========train finished=========] epoch :%d | Loss:%f | accuracy:%f\n" % (
        epoch, loss, accuracy)
    with ut.Log(string):
        pass
def test(train_loader, model, epoch):
    """Test for one epoch on the training set"""
    # batch_time = AverageMeter()
    # losses = AverageMeter()
    # top1 = AverageMeter()

    # switch to test mode
    model.eval()
    right = 0
    total = 0
    ss = []
    for i, (input, target) in enumerate(train_loader):
        target = target.cuda(async=True)
        input = input.cuda()
        input_var = tc.autograd.Variable(input)
        target_var = tc.autograd.Variable(target)

        # compute output
        (output, embedding1, embedding2) = model(input_var)
        if isinstance(epoch, str):
            #extract data
            target.cpu()
            tar = tc.autograd.Variable(
                target.type(tc.FloatTensor).resize_((len(target), 1))).cuda()
            ss.append(torch.cat((tar, embedding1, embedding2), 1).data.cpu())
        else:
            output = functional.log_softmax(output, 1)
            # measure accuracy and record loss
            pred_y = tc.max(output, 1)[1].data.squeeze()
            right = right + sum(pred_y == target)
            total = total + target.size(0)
    if isinstance(epoch, str):
        name = re.findall(".*mfcc/([a-z]*)/", epoch)[0]
        np.savetxt("./" + name + ".txt", np.concatenate(ss, 0))
    else:
        accuracy = right / total
        strings = "[=========test=========] | epoch:%d | accuracy:%f\n" % (
            epoch, accuracy)
        with ut.Log(strings):
            print(strings)
def learning_rate_step(scheduler, epoch):
    scheduler.step(epoch)
    lr = scheduler.get_lr()
    print('Learning rate:{0}\n'.format(lr))
    # print('Epoch:{0}, ' 'Learning rate:{1} |'.format(epoch, lr))
    with ut.Log('Learning rate:{0}\n'.format(lr)):
        pass


@ut.timing("extract_feature")
def extract_feature():
    model = network.net(P.net_kernel_sizes, P.net_channels, P.net_num_classes,
                        P.net_in_channel)
    model.cuda()
    model.load_state_dict(tc.load(P.SAVE))
    kwargs = {'num_workers': 16, 'pin_memory': True}
    for i in [P.TRAIN_DIR, P.VALIDATION_DIR, P.ENROLL_DIR, P.TEST_DIR]:
        data_set = DataGet(i)
        val_loader = tud.DataLoader(data_set,
                                    batch_size=64,
                                    shuffle=False,
                                    **kwargs)
        test(tqdm(val_loader), model, i)


if __name__ == '__main__':
    with ut.Log(
            "================================================================================\n"
    ):
        # main()
        extract_feature()