Example #1
0
def test_process(model, pre_file, data_type, epochs=20, dataset='ucf101', 
    classes=101, cross_index=1, seq_len=3, batch_size=16, split_sequence=False):

    model.load_weights('weights/{}_{}e_cr{}.h5'.format(pre_file,epochs,cross_index))

    out_file = r'{}database/{}-test{}-split{}.pickle'.format(data_output_path,dataset,seq_len,cross_index)
    with open(out_file,'rb') as f2:
        keys = pickle.load(f2)
    
    len_samples = len(keys)

    print('-'*40)
    print('{} testing'.format(pre_file))
    print 'Number samples: {}'.format(len_samples)
    print('-'*40)

    Y_test = gd.getClassData(keys)
    steps = int(np.ceil(len_samples*1.0/batch_size))

    time_start = time.time()

    y_pred = model.predict_generator(
        gd.getTrainData(
            keys=keys,batch_size=batch_size,dataset=dataset,classes=classes,train='test',data_type=data_type,split_sequence=split_sequence), 
        max_queue_size=20, 
        steps=steps)

    run_time = time.time() - time_start

    with open('results/{}_{}e_cr{}.pickle'.format(pre_file,epochs,cross_index),'wb') as fw3:
        pickle.dump([y_pred, Y_test],fw3)

    y_classes = y_pred.argmax(axis=-1)
    print(classification_report(Y_test, y_classes, digits=6))
    print 'Run time: {}'.format(run_time)
    print 'MobileNet Optical #{} stream only: Training'.format(opt_size)
    print('-' * 40)
    print 'Number samples: {}'.format(len_samples)
    if server:
        print 'Number valid: {}'.format(len_valid)
    histories = []

    for e in range(epochs):
        print('-' * 40)
        print('Epoch', e + 1)
        print('-' * 40)

        random.shuffle(keys)
        if server:
            history = result_model.fit_generator(
                gd.getTrainData(keys, batch_size, classes, 2, 'train',
                                opt_size),
                verbose=1,
                max_queue_size=2,
                steps_per_epoch=len_samples / batch_size,
                epochs=1,
                validation_data=gd.getTrainData(keys_valid, batch_size,
                                                classes, 2, 'test', opt_size),
                validation_steps=len_valid / batch_size)
            histories.append([
                history.history['acc'], history.history['val_acc'],
                history.history['loss'], history.history['val_loss']
            ])
        else:
            history = result_model.fit_generator(gd.getTrainData(
                keys, batch_size, classes, 2, 'train', opt_size),
                                                 verbose=1,
Example #3
0
        validation_steps = 5

    for e in range(epochs):
        print('Epoch', e + 1)
        print('-' * 40)

        if server:
            random.shuffle(keys)

        time_start = time.time()

        history = result_model.fit_generator(
            gd.getTrainData(keys=keys,
                            batch_size=batch_size,
                            classes=classes,
                            mode=1,
                            train='train',
                            opt_size=[opt_size],
                            seq=True),
            verbose=1,
            max_queue_size=3,
            steps_per_epoch=steps,
            epochs=1,
            validation_data=gd.getTrainData(keys=keys_valid,
                                            batch_size=batch_size,
                                            classes=classes,
                                            mode=1,
                                            train='test',
                                            opt_size=[opt_size],
                                            seq=True),
            validation_steps=validation_steps)
Example #4
0
def test_process(model,
                 pre_file,
                 data_type,
                 epochs=20,
                 dataset='ucf101',
                 classes=101,
                 cross_index=1,
                 seq_len=3,
                 batch_size=1,
                 split_sequence=False):

    print(
        glob.glob('weights/' + pre_file + '-{:02d}-*.hdf5'.format(epochs))[0])
    model.load_weights(
        glob.glob('weights/' + pre_file + '-{:02d}-*.hdf5'.format(epochs))[0])

    y_pred = np.zeros((3783, 101))

    for i in range(9):
        out_file = r'{}database/{}-test{}-split{}-test0'.format(
            data_output_path, dataset, seq_len,
            cross_index) + str(i) + '.pickle'
        with open(out_file, 'rb') as f2:
            keys = pickle.load(f2)
        len_samples = len(keys)

        print('-' * 40)
        print('{} testing'.format(pre_file))
        print('Number samples: {}'.format(len_samples))
        print('-' * 40)

        Y_test = gd.getClassData(keys)
        steps = int(np.ceil(len_samples * 1.0 / batch_size))

        time_start = time.time()

        y_ = model.predict_generator(gd.getTrainData(
            keys=keys,
            batch_size=batch_size,
            dataset=dataset,
            classes=classes,
            train='test',
            data_type=data_type,
            split_sequence=split_sequence),
                                     max_queue_size=20,
                                     steps=steps)

        run_time = time.time() - time_start

        #y_ = y[2]
        y_p = []
        for i in range(0, 10 * len_samples, 10):
            temp = np.sum(y_[i:i + 9], axis=0) * 1.0 / 10
            y_p.append(temp)
        y_pred = y_pred + y_p

    with open(
            'results/{}_{}e_cr{}.pickle'.format(pre_file, epochs, cross_index),
            'wb') as fw3:
        pickle.dump([y_pred, Y_test], fw3)

    print(np.array(y_pred).shape)
    y_classes = np.array(y_pred).argmax(axis=-1)
    print(classification_report(Y_test, y_classes, digits=6))
    confusion_mtx = confusion_matrix(Y_test, y_classes)
    np.savetxt('results/{}_{}e_cr{}.csv'.format(pre_file, epochs, cross_index),
               confusion_mtx,
               delimiter=",")
    print('Run time: {}'.format(run_time))
Example #5
0
def train_process(model,
                  pre_file,
                  data_type,
                  epochs=20,
                  dataset='ucf101',
                  retrain=False,
                  classes=101,
                  cross_index=1,
                  seq_len=3,
                  old_epochs=0,
                  batch_size=16,
                  split_sequence=False,
                  fine=True):

    out_file = r'{}database/{}-train{}-split{}-new.pickle'.format(
        data_output_path, dataset, seq_len, cross_index)
    valid_file = r'{}database/{}-test{}-split{}-test3.pickle'.format(
        data_output_path, dataset, seq_len, cross_index)

    with open(out_file, 'rb') as f1:
        keys = pickle.load(f1)
    len_samples = len(keys)

    with open(valid_file, 'rb') as f2:
        keys_valid = pickle.load(f2)
    len_valid = len(keys_valid)

    print('-' * 40)
    print('{} training'.format(pre_file))
    print('Number samples: {}'.format(len_samples))
    print('Number valid: {}'.format(len_valid))
    print('-' * 40)

    histories = []
    if server:
        steps = len_samples / batch_size
        validation_steps = int(np.ceil(len_valid * 1.0 / batch_size))
    else:
        steps = len_samples / batch_size
        validation_steps = int(np.ceil(len_valid * 1.0 / batch_size))

    lrate = LearningRateScheduler(step_decay)
    filepath = "weights/" + pre_file + "-{epoch:02d}-{val_acc:.4f}.hdf5"
    checkpoint = ModelCheckpoint(filepath,
                                 monitor='val_acc',
                                 verbose=1,
                                 save_best_only=True,
                                 save_weights_only=True,
                                 mode='max')
    csv_logger = CSVLogger('histories/{}_{}_{}_{}e_cr{}.csv'.format(
        pre_file, seq_len, old_epochs, epochs, cross_index),
                           append=True,
                           separator=';')
    callbacks_list = [checkpoint, csv_logger]

    for e in range(epochs):
        print('Epoch', old_epochs + e + 1)
        random.shuffle(keys)
        model.fit_generator(gd.getTrainData(keys=keys,
                                            batch_size=batch_size,
                                            dataset=dataset,
                                            classes=classes,
                                            train='train',
                                            data_type=data_type,
                                            split_sequence=split_sequence,
                                            epochs=1),
                            verbose=1,
                            max_queue_size=20,
                            steps_per_epoch=steps,
                            epochs=1,
                            shuffle=True,
                            validation_data=gd.getTrainData(
                                keys=keys_valid,
                                batch_size=batch_size,
                                dataset=dataset,
                                classes=classes,
                                train='valid',
                                data_type=data_type,
                                split_sequence=split_sequence),
                            validation_steps=validation_steps,
                            callbacks=callbacks_list)
Example #6
0
def train_process(model, pre_file, data_type, epochs=20, dataset='ucf101', 
    retrain=False, classes=101, cross_index=1, seq_len=3, old_epochs=0, batch_size=16, split_sequence=False, fine=True):

    out_file = r'{}database/{}-train{}-split{}.pickle'.format(data_output_path,dataset,seq_len,cross_index)
    valid_file = r'{}database/{}-test{}-split{}.pickle'.format(data_output_path,dataset,seq_len,cross_index)

    with open(out_file,'rb') as f1:
        keys = pickle.load(f1)
    len_samples = len(keys)

    with open(valid_file,'rb') as f2:
        keys_valid = pickle.load(f2)
    len_valid = len(keys_valid)

    print('-'*40)
    print('{} training'.format(pre_file))
    print 'Number samples: {}'.format(len_samples)
    print 'Number valid: {}'.format(len_valid)
    print('-'*40)

    histories = []
    if server:
        steps = len_samples/batch_size
        validation_steps = int(np.ceil(len_valid*1.0/batch_size))
    else:
        steps = len_samples/batch_size
        validation_steps = int(np.ceil(len_valid*1.0/batch_size))
    
    for e in range(epochs):
        print('Epoch', e+1)
        print('-'*40)

        random.shuffle(keys)

        # def exp_decay(epoch, lr):
        #     print ('Index',epoch, e)
        #     if (e % 3 == 0) & (e != 0): 
        #         lr = lr * 0.9
        #     return lr

        # lrate = LearningRateScheduler(exp_decay, verbose=1)

        time_start = time.time()

        history = model.fit_generator(
            gd.getTrainData(
                keys=keys,batch_size=batch_size,dataset=dataset,classes=classes,train='train',data_type=data_type,split_sequence=split_sequence), 
            verbose=1, 
            max_queue_size=20, 
            steps_per_epoch=steps, 
            epochs=1,
            validation_data=gd.getTrainData(
                keys=keys_valid,batch_size=batch_size,dataset=dataset,classes=classes,train='valid',data_type=data_type,split_sequence=split_sequence),
            validation_steps=validation_steps,
            # callbacks=[lrate]
        )
        run_time = time.time() - time_start

        histories.append([
            history.history['acc'],
            history.history['val_acc'],
            history.history['loss'],
            history.history['val_loss'],
            run_time
        ])
        model.save_weights('weights/{}_{}e_cr{}.h5'.format(pre_file,old_epochs+1+e,cross_index))

        with open('histories/{}_{}_{}_{}e_cr{}'.format(pre_file,seq_len,old_epochs,epochs,cross_index), 'wb') as file_pi:
            pickle.dump(histories, file_pi)
    if server:
        print 'Number valid: {}'.format(len_valid)
    histories = []
    
    for e in range(epochs):
        print('-'*40)
        print('Epoch', e+1)
        print('-'*40)

        random.shuffle(keys)
        if server:
            history = result_model.fit_generator(
                gd.getTrainData(
                    keys,
                    batch_size,
                    classes,
                    5,
                    'train', 
                    opt_size), 
                verbose=1, 
                max_queue_size=2, 
                steps_per_epoch=len_samples/batch_size, 
                epochs=1,
                validation_data=gd.getTrainData(
                    keys_valid,
                    batch_size,
                    classes,
                    5,
                    'test',
                    opt_size),
                validation_steps=len_valid/batch_size
        validation_steps = int(np.ceil(len_valid*1.0/batch_size))
    else:
        steps = 5
        validation_steps = 5
    
    for e in range(epochs):
        print('Epoch', e+1)
        print('-'*40)

        if server:
            random.shuffle(keys)

        time_start = time.time()

        history = result_model.fit_generator(
            gd.getTrainData(
                keys=keys,batch_size=batch_size,classes=classes,mode=3,train='train',opt_size=multi_opt_size), 
            verbose=1, 
            max_queue_size=3, 
            steps_per_epoch=steps, 
            epochs=1,
            validation_data=gd.getTrainData(
                keys=keys_valid,batch_size=batch_size,classes=classes,mode=3,train='test',opt_size=multi_opt_size),
            validation_steps=validation_steps
        )
        run_time = time.time() - time_start

        histories.append([
            history.history['acc'],
            history.history['val_acc'],
            history.history['loss'],
            history.history['val_loss'],
Example #9
0
z = Reshape((1, 1, 2048))(z)
z = Dropout(0.5)(z)
z = Flatten()(z)
z = Dense(classes, activation='softmax')(z)
# Final touch
result_model = Model(inputs=[input_x, input_y1, input_y2, input_y3], outputs=z)
result_model.summary()

result_model.compile(loss='categorical_crossentropy',
                     optimizer=optimizers.SGD(lr=0.001,
                                              decay=1e-6,
                                              momentum=0.9,
                                              nesterov=True),
                     metrics=['accuracy'])

opt_size = 2
batch_size = int(sys.argv[1])
out_file = r'/home/oanhnt/thainh/data/database/train-opt{}.pickle'.format(
    opt_size)
with open(out_file, 'rb') as f1:
    keys = pickle.load(f1)
len_samples = len(keys)

result_model.fit_generator(
    gd.getTrainData(keys, batch_size, classes, 4, 'train', opt_size),
    verbose=1,
    max_queue_size=2,
    steps_per_epoch=len_samples / batch_size,
    epochs=1,
)