Ejemplo n.º 1
0
def main():
    # load midi
    dirpath = '../'
    pieces = loadPieces(dirpath)

    # divide train valid
    valid_pieces = pieces[:num_valid]
    train_pieces = pieces[num_valid:]
    valid_gen = BatchGenerator(pieces[0], valid_batch_size, 1)
    train_gen = MixedGenarator(pieces, batch_size, num_unrolling)

    # create model ans start training
    model = LSTM_model(layer_size, batch_size, num_unrolling)
    model.train(train_gen, valid_gen, train_step=10000, summary_frequency=100)
Ejemplo n.º 2
0
def main(args):
    test_path = get_dset_path(args.dataset_name, 'test')

    logger.info("Initializing test dataset")
    test_dset, test_loader = data_loader(args, test_path)

    net = LSTM_model(args)
    net = net.cuda()

    checkpoint_path = ".\model\lstm767.tar"
    checkpoint = torch.load(checkpoint_path)
    net.load_state_dict(checkpoint['state_dict'])
    net.eval()

    batch_error = 0
    batch_fde = 0
    for idx, batch in enumerate(test_loader):

        (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel, non_linear_ped,
         loss_mask, seq_start_end) = batch
        num_ped = obs_traj.size(1)   # (8 n 2)
        pred_traj_gt = pred_traj_gt.cuda()
        pred_traj = net(obs_traj.cuda(), num_ped, pred_traj_gt)

        ade_1 = get_mean_error(pred_traj, pred_traj_gt)
        ade_2 = displacement_error(pred_traj, pred_traj_gt) / (pred_traj.size(1) * 12)
        fde = final_displacement_error(pred_traj, pred_traj_gt) / pred_traj.size(1)

        batch_error += ade_2
        batch_fde += fde
    ade = batch_error / (idx+1)
    fin_fde = batch_fde / (idx+1)
    logger.info("ade is {:.2f}".format(ade))
    logger.info("ade is {:.2f}".format(fin_fde))
Ejemplo n.º 3
0
def main(args):
    os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
    device_ids = [0, 1]
    test_path = get_dset_path(args.dataset_name, 'test')

    logger.info("Initializing test dataset")
    test_dset, test_loader = data_loader(args, test_path)

    net = LSTM_model(args)
    #net = net.cuda(device_ids[1])

    checkpoint_path = "./model/lstm348.tar"
    checkpoint = torch.load(checkpoint_path)
    net.load_state_dict(checkpoint['state_dict'])
    net.eval()

    count = 0
    total_ade = 0
    total_fde = 0
    for batch in test_loader:
        (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel, non_linear_ped,
         loss_mask, seq_start_end) = batch
        num_ped = obs_traj.size(1)   # (8 n 2)
        #pred_traj_gt = pred_traj_gt.cuda(device_ids[1])
        pred_traj = net(obs_traj, num_ped, pred_traj_gt, seq_start_end)
        ade = get_mean_error(pred_traj, pred_traj_gt)
        total_ade += ade
        fde = final_displacement_error(pred_traj[-1], pred_traj_gt[-1])
        total_fde += (fde / num_ped)
        #logger.info("ade is {:.2f}".format(ade))
        count += 1

    ade_fin = total_ade / count
    fde_fin = total_fde / count
    logger.info("ade is {:.2f}".format(ade_fin))
    logger.info("fde is {:.2f}".format(fde_fin))
Ejemplo n.º 4
0
def main(args):
    train_path = get_dset_path(args.dataset_name, 'train')
    val_path = get_dset_path(args.dataset_name, 'val')

    # 随机种子
    # torch.manual_seed(2)
    # np.random.seed(2)
    # if args.use_gpu:
    #     torch.cuda.manual_seed_all(2)

    logger.info("Initializing train dataset")
    train_dset, train_loader = data_loader(args, train_path)
    logger.info("Initializing val dataset")
    _, val_loader = data_loader(args, val_path)

    log_path = './log/'
    log_file_curve = open(os.path.join(log_path, 'log_loss.txt'), 'w+')
    log_file_curve_val = open(os.path.join(log_path, 'log_loss_val.txt'), 'w+')
    log_file_curve_val_ade = open(
        os.path.join(log_path, 'log_loss_val_ade.txt'), 'w+')

    net = LSTM_model(args)
    if args.use_gpu:
        net = net.cuda()

    optimizer = torch.optim.Adam(net.parameters(), lr=args.learning_rate)
    #scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.98)

    #接着上次训练的地方继续训练
    # restore_path = '.\model\lstm294.tar'
    # logger.info('Restoring from checkpoint {}'.format(restore_path))
    # checkpoint = torch.load(restore_path)
    # net.load_state_dict(checkpoint['state_dict'])
    # optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
    #
    # for i_epoch in range(checkpoint['epoch']+1):
    #     if (i_epoch + 1) % 100 == 0:
    #         args.learning_rate *= 0.98

    epoch_loss_min = 160
    epoch_smallest = 0
    #for epoch in range(checkpoint['epoch']+1, args.num_epochs):
    for epoch in range(args.num_epochs):
        count = 0
        batch_loss = 0

        for batch in train_loader:
            # Zero out gradients
            net.zero_grad()
            optimizer.zero_grad()

            (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel,
             non_linear_ped, loss_mask, seq_start_end) = batch
            num_ped = obs_traj.size(1)
            pred_traj_gt = pred_traj_gt

            #model_teacher.py
            pred_traj = net(obs_traj, num_ped, pred_traj_gt, seq_start_end)
            loss = displacement_error(pred_traj, pred_traj_gt)
            #loss = get_mean_error(pred_traj, pred_traj_gt)

            # Compute gradients
            loss.backward()
            # Clip gradients
            torch.nn.utils.clip_grad_norm_(net.parameters(), args.grad_clip)
            # Update parameters
            optimizer.step()

            batch_loss += loss
            count += 1

            #print(loss / num_ped)
        if (epoch + 1) % 6 == 0:
            pass
            #scheduler.step()
        logger.info('epoch {} train loss is {}'.format(epoch,
                                                       batch_loss / count))
        log_file_curve.write(str(batch_loss.item() / count) + "\n")

        batch_loss = 0
        val_ade = 0
        total_ade = 0
        for idx, batch in enumerate(val_loader):
            (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel,
             non_linear_ped, loss_mask, seq_start_end) = batch
            num_ped = obs_traj.size(1)
            pred_traj_gt = pred_traj_gt

            # model_teacher.py
            pred_traj = net(obs_traj, num_ped, pred_traj_gt, seq_start_end)
            loss = displacement_error(pred_traj, pred_traj_gt)

            batch_loss += loss
            val_ade += loss / (num_ped * 12)
            total_ade += val_ade

            count += 1

        fin_ade = total_ade / (idx + 1)
        log_file_curve_val_ade.write(str(fin_ade.item()) + "\n")

        epoch_loss = batch_loss / count
        if epoch_loss_min > epoch_loss:
            epoch_loss_min = epoch_loss
            epoch_smallest = epoch

            logger.info('Saving model')
            torch.save(
                {
                    'epoch': epoch,
                    'state_dict': net.state_dict(),
                    'optimizer_state_dict': optimizer.state_dict()
                }, checkpoint_path(epoch))
        logger.info('epoch {} val loss is {}'.format(epoch, epoch_loss))
        log_file_curve_val.write(str(epoch_loss.item()) + "\n")
        logger.info('epoch {} is smallest loss is {}'.format(
            epoch_smallest, epoch_loss_min))
        logger.info('the smallest ade is {}'.format(total_ade / (idx + 1)))
        logger.info("-" * 50)
Ejemplo n.º 5
0
train_df = pd.read_csv(path + "train.csv",
                       dtype=utilities.dtypes,
                       usecols=[
                           'ip', 'app', 'device', 'os', 'channel',
                           'click_time', 'is_attributed'
                       ])
# train_df['hour'] = utilities.date_unpacker(train_df)[0]
# train_df['day'] = utilities.date_unpacker(train_df)[1]
# train_df['wday']  = utilities.date_unpacker(train_df)[2]
train_df.drop(['click_time'], 1, inplace=True)
y_train = train_df['is_attributed']
train_df.drop(['is_attributed'], 1, inplace=True)

train_df = utilities.data_stream_molder(train_df)

lstm_model = LSTM_model()
model = lstm_model.get_model()

tensorboard = TensorBoard(log_dir='./logs_lstm_no_time',
                          histogram_freq=0,
                          write_graph=True,
                          write_images=False)

model.fit(train_df,
          y_train,
          batch_size=100000,
          epochs=2,
          validation_split=0.2,
          shuffle=True,
          callbacks=[tensorboard],
          verbose=2)
Ejemplo n.º 6
0
def main(args):

    sequence_length = 28
    input_size = 28
    num_layers = 2
    num_classes = 10
    learning_rate = args.learning_rate
    batch_size = args.batch_size
    hidden_size = args.hidden_size
    num_epochs = args.max_epoch
    mask = args.mask
    alpha = args.alpha

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    log_dir = args.log_root
    data_dir = args.data_root

    if not os.path.exists(log_dir):
        os.makedirs(log_dir)

    def repackage_hidden(h):
        """Wraps hidden states in new Tensors, to detach them from their history."""
        if isinstance(h, torch.Tensor):
            return h.detach()
        else:
            return tuple(repackage_hidden(v) for v in h)

    ###############################################################################
    # Logging
    ###############################################################################
    model_config_name = 'nhid:{}-nlayer:{}-epoch:{}-mask:{}-alpha:{}'.format(
        hidden_size, num_layers, num_epochs, mask, alpha)

    log_file_name = "{}.log".format(model_config_name)
    log_file_path = os.path.join(log_dir, log_file_name)
    logging.basicConfig(
        filename=log_file_path,
        level=logging.INFO,
        datefmt='%Y/%m/%d %H:%M:%S',
        format='%(asctime)s - %(levelname)s - %(message)s',
    )
    logger = logging.getLogger()
    logger.addHandler(logging.StreamHandler(stream=sys.stdout))

    ###############################################################################
    # Loading data
    ###############################################################################

    train_dataset = torchvision.datasets.MNIST(root=data_dir,
                                               train=True,
                                               download=True,
                                               transform=transforms.ToTensor())
    test_dataset = torchvision.datasets.MNIST(root=data_dir,
                                              train=False,
                                              download=True,
                                              transform=transforms.ToTensor())

    train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                               batch_size=batch_size,
                                               shuffle=True)
    test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                              batch_size=batch_size,
                                              shuffle=False)

    model = LSTM_model(input_size,
                       hidden_size,
                       num_layers,
                       num_classes,
                       mask=mask).to(device)
    # Loss and optimizer
    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

    # Train the model
    total_step = len(train_loader)
    for epoch in range(num_epochs):

        hidden = model.init_hidden(batch_size)

        for i, (images, labels) in enumerate(train_loader):
            images = images.reshape(-1, sequence_length, input_size).to(device)
            labels = labels.to(device)
            hidden = repackage_hidden(hidden)
            # Forward pass
            outputs, hidden = model(images, hidden)
            loss = criterion(outputs, labels)
            sparse_regularization = torch.tensor(0.).to(device)
            for name, param in model.named_parameters():
                if name.find('threshold') != -1:
                    sparse_regularization += torch.sum(torch.exp(-param))
            loss = loss + alpha * sparse_regularization
            # Backward and optimize
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if (i + 1) % 100 == 0:
                logger.info(
                    '|Epoch [{}/{}] | Step [{}/{}] | Loss: {:.4f}'.format(
                        epoch + 1, num_epochs, i + 1, total_step, loss.item()))
                if mask:
                    logger.info("Lstm 1, keep ratio {:.4f}".format(
                        model.lstm1.cell.keep_ratio))
                    logger.info("Lstm 2, keep ratio {:.4f}".format(
                        model.lstm2.cell.keep_ratio))
                    logger.info("Model keep ratio {:.4f}".format(
                        model.keep_ratio))

    logger.info("Training process finish")

    # Test the model
    with torch.no_grad():
        correct = 0
        total = 0

        hidden = model.init_hidden(batch_size)
        for images, labels in test_loader:
            images = images.reshape(-1, sequence_length, input_size).to(device)
            labels = labels.to(device)
            hidden = repackage_hidden(hidden)
            outputs, hidden = model(images, hidden)
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()

        logger.info(
            'Test Accuracy of the model on the 10000 test images: {:.5f}'.
            format(100.0 * correct / total))
Ejemplo n.º 7
0
test_video_path = './Datasets/YawDD/test/test_videos'
clip_length = model_config.lstm_step
batch_size = model_config.test_sample_batch
labels = model_config.labels
os.environ["CUDA_VISIBLE_DEVICES"] = "1"

# load data
clips = read_data(test_video_path, test_frame_path, clip_length=clip_length)
sample_num = np.sum(np.array([len(clip_list) for cls, clip_list in clips.items()]))
clip_gen = clip_generator(clips, test_frame_path, labels, batch_size=batch_size)

# model build
InceptionModel = Inception_model()
InceptionModel.load_weights('./out/Inception_weight.h5')
feature_model = Model(inputs=InceptionModel.input, outputs=InceptionModel.get_layer('feature_out').output)
lstmModel = LSTM_model(model_config.lstm_step)
lstmModel.load_weights('./out/lstm_weight.h5')

# test
test_step = 0
tp_sample = 0
p_sample = 0
sample_count = 0
total_time = 0
start_time = time.time()
while sample_count<=sample_num:
    clip_batch, clip_batch_label = next(clip_gen)
    clip_feature_batch = []

    batch_start_time = time.time()
    for clip in clip_batch:
Ejemplo n.º 8
0
    X1[i] = panier

print(panier)
print(X1)

X = X1[:, :-1, :]
Y = X1[:, -1, :]

#print(X.shape)
#print(Y.shape)

#Train test split to train and test model
x_train, x_test, y_train, y_test = train_test_split(X, Y)

#model
model_instacart = LSTM_model(X.shape[1], X.shape[2])

model_instacart.summary()

callback_1 = keras.callbacks.TensorBoard(log_dir='trainings/t2')
callback_2 = keras.callbacks.EarlyStopping(monitor='val_loss',
                                           min_delta=0.0005,
                                           patience=5)
callback_3 = keras.callbacks.ModelCheckpoint(filepath='weights.hdf5',
                                             verbose=1,
                                             save_best_only=True)

model_instacart.fit(x_train,
                    y_train,
                    epochs=50,
                    batch_size=256,
Ejemplo n.º 9
0
df = pd.io.gbq.read_gbq(query, project_id=project_id)

dataset = dataset(df,
                  table='snp',
                  min_date=dt.datetime(2010,
                                       1,
                                       1,
                                       0,
                                       0,
                                       0,
                                       tzinfo=dt.timezone.utc),
                  max_date=dt.datetime(2020,
                                       6,
                                       1,
                                       0,
                                       0,
                                       0,
                                       tzinfo=dt.timezone.utc))
dataset.prepare_dataset()
model = LSTM_model(1, 'snp')
train_data_generator = datagen(df=dataset.traindata,
                               gen_length=model.model_params['SEQ_LEN'],
                               batch_size=model.model_params['batch_size'],
                               shuffle=False)
test_data_generator = datagen(df=dataset.testdata,
                              gen_length=model.model_params['SEQ_LEN'],
                              batch_size=model.model_params['batch_size'],
                              shuffle=False)
model.train(train_gen=train_data_generator, validation_gen=test_data_generator)
Ejemplo n.º 10
0
    file.close()
with open(val_feature_path, 'rb') as file:
    val_feature_data = pickle.load(file)
    file.close()
labels = model_config.labels
train_feature_generator = feature_generator(
    train_feature_data,
    labels=labels,
    batch_size=feature_batch_size,
    clip_feature_shape=clip_feature_size)
val_feature_generator = feature_generator(val_feature_data,
                                          labels,
                                          batch_size=feature_batch_size,
                                          clip_feature_shape=clip_feature_size)
# LSTM model
lstmModel = LSTM_model(model_config.lstm_step)
lstmModel.summary()
lstmModel._get_distribution_strategy = lambda: None
lstmModel.compile(optimizer=adam(lr=0.001, decay=1e-6),
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

# lstm train
lstmModel_history = lstmModel.fit_generator(
    train_feature_generator,
    steps_per_epoch=4,
    validation_data=val_feature_generator,
    validation_steps=1,
    epochs=64,
    callbacks=callback_list)
# lstmModel.save('./out/lstm_weight_final_2.h5')