Esempio n. 1
0
def predict(emotion, from_dir, to_dir):
    custom_data = CustomData(from_dir, isTrain=False)
    dataset = DataLoader(custom_data, shuffle=False)

    df = pd.DataFrame(columns=['filename', 'score'])

    with torch.no_grad():
        model, _, _ = loadModel(emotion,
                                model_type='ResNet',
                                save_dir='./checkpoints/ResNet50',
                                isTrain=False)

        for i, data in enumerate(dataset):
            score, _ = model(Variable(data['img']).cuda())
            df.loc[i] = [data['name'][0], score.cpu().numpy()[0]]
            if (i + 1) % 100 == 0:
                print('Score %d pictures' % (i + 1))

    print('Score %d pictures' % (len(dataset)))
    to_file = os.path.join(to_dir, '%s_score.csv' % emotion)
    df.to_csv(to_file, index=False)
Esempio n. 2
0
def test_ResNet(emotion):
    csv_file = os.path.join('./data/test', '%s.csv' % emotion)
    custom_data = CustomData('./data/test/img_files', csv_file, isTrain=False)
    dataset = DataLoader(custom_data, batch_size=1, shuffle=False)
    model, _, _ = loadModel(emotion,
                            model_type='ResNet',
                            save_dir='./checkpoints/ResNet50',
                            isTrain=False)
    with torch.no_grad():
        losses = []
        for data in dataset:
            result, loss = model(
                Variable(data['img']).cuda(),
                Variable(data['label']).float().cuda())
            loss = loss.cpu().numpy()
            losses.extend(loss)

        losses = np.array(losses)
        losses = np.sqrt(losses)

        test_result = emotion + ': %f' % (np.sum(losses) / len(losses))
        print(len(losses))
        print(test_result)
        log('./results/test_loss.txt', test_result + '\n')
Esempio n. 3
0
def train_ResNet(emotion, load=False):
    csv_file = os.path.join('./data/train', emotion + '.csv')
    train_data = CustomData('./data/train/img_files', csv_file)
    train_dataset = DataLoader(train_data, batch_size=batch_size, shuffle=True)
    test_data = CustomData('./data/test/img_files', csv_file, isTrain=False)
    test_dataset = DataLoader(test_data, batch_size=batch_size)

    if load:
        model, start_epoch, total_step = loadModel(
            emotion, model_type='ResNet', save_dir='./checkpoints/ResNet50')
        print('continue to train. start from epoch %s' % start_epoch)
    else:
        total_step = 0
        start_epoch = 0
        model = ResNet().cuda()
    print(list(model.children()))

    if start_epoch > lr_decay:
        lr = init_lr - (start_epoch - lr_decay) * init_lr / 100.0
    else:
        lr = init_lr

    for epoch in range(start_epoch, all_epoch):

        if epoch > lr_decay:
            lr_dec = init_lr / 100.0
            lr = lr - lr_dec

        train_losses = []
        test_losses = []
        for data in train_dataset:
            total_step += 1
            result, loss = model(
                Variable(data['img']).cuda(),
                Variable(data['label']).float().cuda())
            if (epoch < adjust):
                optimizer = model.get_optimizer(lr)
            else:
                optimizer = model.get_optimizer(lr, True)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            train_losses.append(loss.detach().cpu().numpy())

        with torch.no_grad():
            for data in test_dataset:
                result, loss = model(
                    Variable(data['img']).cuda(),
                    Variable(data['label']).float().cuda())
                test_losses.append(loss.cpu().numpy())

        t = datetime.datetime.now()
        str = 'current time: {}\nepoch: {}\ntrain_loss: {}\ntest_loss: {}\n----------------\n' \
            .format(t, epoch, np.average(train_losses), np.average(test_losses))
        print(str)
        log('checkpoints/ResNet50/log_%s.txt' % emotion, str)

        train_losses.clear()
        test_losses.clear()

        if (epoch + 1) % save_delta == 0:
            saveModel(model,
                      emotion,
                      epoch,
                      total_step,
                      save_dir='./checkpoints/ResNet50')
Esempio n. 4
0
    # assuming that shuffle=True in keras.fit is shuffling correctly
    history = model.fit(trainX,
                        trainY,
                        shuffle=True,
                        validation_data=(testX, testY),
                        batch_size=batchSize,
                        epochs=nrOfEpochs)
    model.summary()
    model.save('models/model_' + str(int(round(time.time()))) + '.h5')
    # saving acc and loss graph as png
    plotAndSaveHistory(history)
    # predicting classes for 1% of the dataset mX
    # predict(model, mX, mY, 0.01)
elif mode is Mode.PREDICT:
    X, Y = loadData()
    (mX, mY) = preProcess(X, Y, numberOfChannels, numberOfClasses,
                          cropWindowSize, cropWindowShift, fs)
    # visuals some spectrograms
    visualizeSpectrogram(mX,
                         spectroWindowSize,
                         spectroWindowShift,
                         fs,
                         nrOfSpectrogram=1)
    model = loadModel(modelPath)

    print("keras.evaluate() result: ", model.evaluate(mX, mY))
    # predicting classes for 100% of the dataset mX
    predict(model, mX, mY, amount=1, verbose=0)
elif mode is Mode.LIVE:
    raise NotImplementedError("Live mode not implemented yet")
Esempio n. 5
0
'----dev----'

# cd = ClassiferDev()
# cd.name = 'GRU_128_3-30'
# cd.setModel(loadModel(cd.name))

'====prediction===='

# from Classifier import ClassifierDev
# cd = ClassifierDev()
# cd.trainingdata.all_categories
from Classifier import Classifier
# from Classifier import Mapper
model_name = 'GRU_128_3-30'
classifier = Classifier(category_names=['chem', 'COMMON'],
                        model=loadModel(model_name))
# classifer.model.to('cpu')
classifier.mentionToType('ethanol')

# for t in [0.01, 0.16, 0.2, 0.25, 0.33, 0.4, 0.5, 0.6, 0.66, 0.75, 0.8, 0.83, 1]:
#     print('threshold:{}'.format(t))
#     classifer.mapper = Mapper(dict_point = 3, pred_point = 1, threshold = t)
#     pred = copy.deepcopy(backoff_1)
#     classifer.predict(pred)
#     conf = evaluation_sens(dev, pred)

from Classifier import MapperNone

classifier.mapper = MapperNone()

pred = copy.deepcopy(backoff_1)