Exemplo n.º 1
0
def main(opt):
    if opt.mode == "video":
        assert opt.video != '', "No demo path"
        camera = cv2.VideoCapture(opt.video)
    else:
        camera = cv2.VideoCapture(0)
    opt.heads['depth'] = opt.num_output
    if opt.load_model == '':
        opt.load_model = '../models/model_conv3d_last.pth'
    if opt.gpus[0] >= 0:
        opt.device = torch.device('cuda:{}'.format(opt.gpus[0]))
    else:
        opt.device = torch.device('cpu')

    timestep = 4

    if opt.task == "conv3d":
        model, _, _ = create_conv3d(opt, timestep)
    else:
        model, _, _ = create_lstm(opt, timestep)
    model = model.to(opt.device)
    model.eval()

    debugger = Dcam()
    k = 0
    input_imgs = []

    while debugger.loop_on:
        ret, frame = camera.read()
        if frame is None:
            return print("***No Camera Connecting***")

        if len(input_imgs) < timestep:
            input_imgs.append(frame)
        elif len(input_imgs) == timestep:
            input_imgs.append(frame)
            input_imgs.pop(0)

            image, pred, pred_3d, ignore_idx = demo_image(
                input_imgs, model, opt, timestep)
            debugger.add_img(image)
            debugger.add_point_2d(pred, (255, 0, 0))
            debugger.add_point_3d(pred_3d, 'b', ignore_idx=ignore_idx)
            debugger.realtime_show(k)
            debugger.destroy_loop()
            debugger.show_all_imgs()

            k = cv2.waitKey(10)
            if k == 27:
                debugger.loop_on = 0
        else:
            raise ValueError

    cv2.destroyAllWindows()
    camera.release()
Exemplo n.º 2
0
def train_generator(generator, epochs=1):
    """
    TODO: use model.create_lstm and add dynamic shape params
    """
    features_shape = list(train_dataset.features.values())[0][0].shape
    captions_shape = (generator.dataset.vocabulary.caption_max_length, )
    output_size = len(generator.dataset.vocabulary.tokenizer.word_index) + 1
    steps_per_epoch = len(generator.dataset.captions)
    lstm = create_lstm(features_shape, captions_shape, output_size)
    yielder = generator.generate()
    for i in range(epochs):
        lstm.fit_generator(yielder,
                           epochs=epochs,
                           steps_per_epoch=steps_per_epoch,
                           verbose=1)
        model.save('model_' + str(i) + '.h5')
Exemplo n.º 3
0
def train_model(x_tr,
                y_tr,
                x_val,
                y_val,
                max_length,
                size_voc,
                output_dim=100,
                batch_size=50):
    # preprocess data
    x_tr, y_tr, x_val, y_val = preprocess_pad(x_tr, y_tr, x_val, y_val,
                                              max_length)
    print(x_tr.shape)
    print(y_tr.shape)
    print(x_val.shape)
    print(y_val.shape)
    model = create_lstm(input_dim=size_voc,
                        max_length=max_length,
                        output_dim=output_dim,
                        n_class=2)
    sgd = keras.optimizers.SGD(lr=0.01,
                               momentum=0.9,
                               decay=1e-5,
                               nesterov=True)
    model.compile(optimizer='adam',
                  loss='binary_crossentropy',
                  metrics=['acc'])
    generator_train = generator_shuffle(x_tr,
                                        y_tr,
                                        2,
                                        batch_size=batch_size,
                                        input_shape=(max_length))
    generator_valid = generator(x_val,
                                y_val,
                                2,
                                batch_size=batch_size,
                                input_shape=(max_length))

    step_train = int(len(x_tr) / batch_size) - 1
    step_val = int(len(x_val) / batch_size) - 1
    print('shape:', len(x_tr))
    print('shape:', len(x_val))
    print('step train :', step_train)
    print('step test :', step_val)

    # callback
    callback_tensorboard = keras.callbacks.TensorBoard(
        log_dir='./logs',
        histogram_freq=0,
        batch_size=32,
        write_graph=True,
        write_grads=False,
        write_images=False,
        embeddings_freq=0,
        embeddings_layer_names=None,
        embeddings_metadata=None)
    checkpoints = ModelCheckpoint('CBOW_keras_model.hdf5',
                                  verbose=1,
                                  save_best_only=True,
                                  period=1)  # -{epoch:02d}
    callbacks_list = [callback_tensorboard, checkpoints]

    # train
    model.fit_generator(generator_train,
                        steps_per_epoch=step_train,
                        epochs=10,
                        verbose=1,
                        validation_data=generator_valid,
                        validation_steps=step_val,
                        callbacks=callbacks_list)
    return model
Exemplo n.º 4
0
def main(opt):
    if opt.disable_cudnn:
        torch.backends.cudnn.enabled = False
        print('Cudnn is disabled.')

    timestep = 4

    logger = Logger(opt)
    opt.device = torch.device('cuda:{}'.format(opt.gpus[0]))

    Dataset = dataset_factory[opt.dataset]
    LstmData = SeqH36m(Dataset(opt, 'train', 1), timestep)
    train, val = task_factory[opt.task]

    if opt.task == "conv3d":
        model, optimizer, start_epoch = create_conv3d(opt, timestep)
    else:
        model, optimizer, start_epoch = create_lstm(opt, timestep)

    if len(opt.gpus) > 1:
        model = torch.nn.DataParallel(model,
                                      device_ids=opt.gpus).cuda(opt.device)
    else:
        model = model.cuda(opt.device)

    val_loader = torch.utils.data.DataLoader(SeqH36m(Dataset(opt, 'val', 1),
                                                     timestep),
                                             batch_size=1,
                                             shuffle=False,
                                             num_workers=1,
                                             pin_memory=True)

    if opt.test:
        log_dict_train, preds = val(0, opt, val_loader, model)
        sio.savemat(os.path.join(opt.save_dir, 'preds.mat'),
                    mdict={'preds': preds})
        return

    train_loader = torch.utils.data.DataLoader(
        LstmData,
        batch_size=opt.batch_size * len(opt.gpus),
        shuffle=True,  # if opt.debug == 0 else False,
        num_workers=opt.num_workers,
        pin_memory=True)

    best = -1
    for epoch in range(start_epoch, opt.num_epochs + 1):
        mark = epoch if opt.save_all_models else 'last'
        log_dict_train, _ = train(epoch, opt, train_loader, model, optimizer,
                                  timestep)
        for k, v in log_dict_train.items():
            logger.scalar_summary('train_{}'.format(k), v, epoch)
            logger.write('{} {:8f} | '.format(k, v))
        if opt.val_intervals > 0 and epoch % opt.val_intervals == 0:
            save_model(
                os.path.join(opt.save_dir, 'model_lstm_{}.pth'.format(mark)),
                epoch, model, optimizer)
            log_dict_val, preds = val(epoch, opt, val_loader, model, timestep)
            for k, v in log_dict_val.items():
                logger.scalar_summary('val_{}'.format(k), v, epoch)
                logger.write('{} {:8f} | '.format(k, v))
            if log_dict_val[opt.metric] > best:
                best = log_dict_val[opt.metric]
                save_model(os.path.join(opt.save_dir, 'model_lstm_best.pth'),
                           epoch, model)
        else:
            save_model(os.path.join(opt.save_dir, 'model_lstm_last.pth'),
                       epoch, model, optimizer)
        logger.write('\n')
        if epoch in opt.lr_step:
            lr = opt.lr * (0.1**(opt.lr_step.index(epoch) + 1))
            print('Drop LR to', lr)
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr
    # logger.close()
    return log_dict_train['loss']
Exemplo n.º 5
0
def train_model(x_tr,
                y_tr,
                x_val,
                y_val,
                max_length,
                size_voc,
                dic_word,
                output_dim=100,
                batch_size=50,
                embedding_glove=False,
                lemmatisation=False,
                output_name='temp'):
    '''
        train model from train and validatoin data
        max length = size max of a sequence (tweet)
        size_voc : size of the vocabulary in the train data
        dic_word : dictionary which contain the conversion word to number
        output_name: name we give to the model when we save it
        output_dim: output dimension of the embedding layers
        batch_size : 
        embedding glove : choose to load or not GloVe Embedding weight
        func : function which return the function of the model used for the training (see model.py for the model)
    '''
    if embedding_glove == False:
        emb = None
    else:
        print('loading glove ....')
        emb = load_GloveEmbedding(output_dim,
                                  dic_word,
                                  lemmatisation=lemmatisation)
    # create model

    model = create_lstm(input_dim=size_voc,
                        max_length=max_length,
                        output_dim=output_dim,
                        n_class=2,
                        embedding=emb)

    old = load_model('lstm_glove_bigDataset.hdf5')
    # load weight for transfer learning
    model = copy_weight(model, old)
    del old

    sgd = keras.optimizers.SGD(lr=0.001,
                               momentum=0.9,
                               decay=1e-5,
                               nesterov=True)
    adam = keras.optimizers.Adam(lr=0.001,
                                 beta_1=0.9,
                                 beta_2=0.999,
                                 epsilon=1e-08,
                                 decay=0.0)
    adagrad = keras.optimizers.Adagrad(lr=0.001, epsilon=None, decay=0.0)
    model.compile(optimizer=sgd,
                  loss='binary_crossentropy',
                  metrics=['accuracy'])

    # create generator
    generator_train = generator_shuffle(x_tr,
                                        y_tr,
                                        batch_size=batch_size,
                                        input_shape=(max_length))
    generator_valid = generator(x_val,
                                y_val,
                                batch_size=batch_size,
                                input_shape=(max_length))

    step_train = int(len(x_tr) / batch_size) - 1
    step_val = int(len(x_val) / batch_size) - 1
    print('shape:', len(x_tr))
    print('shape:', len(x_val))
    print('step train :', step_train)
    print('step test :', step_val)

    # callback
    callback_tensorboard = keras.callbacks.TensorBoard(
        log_dir='./logs/' + output_name,
        histogram_freq=0,
        batch_size=16,
        write_graph=True,
        write_grads=False,
        write_images=False,
        embeddings_freq=0,
        embeddings_layer_names=None,
        embeddings_metadata=None)
    lr_decay = keras.callbacks.ReduceLROnPlateau(monitor='val_loss',
                                                 factor=0.5,
                                                 patience=2,
                                                 verbose=1,
                                                 mode='auto',
                                                 epsilon=0.0001,
                                                 cooldown=0,
                                                 min_lr=0.0)
    checkpoints = ModelCheckpoint(output_name + '.hdf5',
                                  verbose=1,
                                  save_best_only=True,
                                  period=1)  # -{epoch:02d}
    callbacks_list = [callback_tensorboard, checkpoints, lr_decay]

    # train
    model.fit_generator(generator_train,
                        steps_per_epoch=step_train,
                        epochs=15,
                        verbose=1,
                        validation_data=generator_valid,
                        validation_steps=step_val,
                        callbacks=callbacks_list)

    ## test

    return model