def fit_norm_distribution_param(args, model, train_dataset, endPoint=10000):
    # Turn on evaluation mode which disables dropout.
    model.eval()
    pasthidden = model.init_hidden(1)
    predictions = []
    organized = []
    errors = []
    #out = Variable(test_dataset[0].unsqueeze(0))
    for t in range(endPoint):
        out, hidden = model.forward(Variable(train_dataset[t].unsqueeze(0), volatile=True), pasthidden)
        predictions.append([])
        organized.append([])
        errors.append([])
        predictions[t].append(out.data.cpu()[0][0][0])
        pasthidden = model.repackage_hidden(hidden)
        for prediction_step in range(1,args.prediction_window_size):
            out, hidden = model.forward(out, hidden)
            predictions[t].append(out.data.cpu()[0][0][0])

        if t >= args.prediction_window_size:
            for step in range(args.prediction_window_size):
                organized[t].append(predictions[step+t-args.prediction_window_size][args.prediction_window_size-1-step])
            errors[t] = torch.FloatTensor(organized[t]) - train_dataset[t][0][0]
            if args.cuda:
                errors[t] = errors[t].cuda()
            errors[t] = errors[t].unsqueeze(0)

    errors_tensor = torch.cat(errors[args.prediction_window_size:],dim=0)
    mean = errors_tensor.mean(dim=0)
    cov = errors_tensor.t().mm(errors_tensor)/errors_tensor.size(0) - mean.unsqueeze(1).mm(mean.unsqueeze(0))
    # cov: positive-semidefinite and symmetric.

    return mean, cov
示例#2
0
def train(args, model, train_dataset, epoch):

    with torch.enable_grad():
        # Turn on training mode which enables dropout.
        model.train()
        total_loss = 0
        start_time = time.time()
        hidden = model.init_hidden(args.batch_size)
        for batch, i in enumerate(range(0, train_dataset.size(0) - 1, args.bptt)):
            inputSeq, targetSeq = get_batch(args,train_dataset, i)
            # inputSeq: [ seq_len * batch_size * feature_size ]
            # targetSeq: [ seq_len * batch_size * feature_size ]

            # Starting each batch, we detach the hidden state from how it was previously produced.
            # If we didn't, the model would try backpropagating all the way to start of the dataset.
            hidden = model.repackage_hidden(hidden)
            hidden_ = model.repackage_hidden(hidden)
            optimizer.zero_grad()

            '''Loss1: Free running loss'''
            outVal = inputSeq[0].unsqueeze(0)
            outVals=[]
            hids1 = []
            for i in range(inputSeq.size(0)):
                outVal, hidden_, hid = model.forward(outVal, hidden_,return_hiddens=True)
                outVals.append(outVal)
                hids1.append(hid)
            outSeq1 = torch.cat(outVals,dim=0)
            hids1 = torch.cat(hids1,dim=0)
            loss1 = criterion(outSeq1.contiguous().view(args.batch_size,-1), targetSeq.contiguous().view(args.batch_size,-1))

            '''Loss2: Teacher forcing loss'''
            outSeq2, hidden, hids2 = model.forward(inputSeq, hidden, return_hiddens=True)
            loss2 = criterion(outSeq2.contiguous().view(args.batch_size, -1), targetSeq.contiguous().view(args.batch_size, -1))

            '''Loss3: Simplified Professor forcing loss'''
            loss3 = criterion(hids1.contiguous().view(args.batch_size,-1), hids2.contiguous().view(args.batch_size,-1).detach())

            '''Total loss = Loss1+Loss2+Loss3'''
            loss = loss1+loss2+loss3
            
            loss.backward()

            # `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
            torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
            optimizer.step()

            total_loss += loss.item()

            if batch % args.log_interval == 0 and batch > 0:
                cur_loss = total_loss / args.log_interval
                elapsed = time.time() - start_time
                print('| epoch {:3d} | {:5d}/{:5d} batches | ms/batch {:5.4f} | '
                      'loss {:5.5f} '.format(
                    epoch, batch, len(train_dataset) // args.bptt,
                                  elapsed * 1000 / args.log_interval, cur_loss))
                total_loss = 0
                start_time = time.time()
def train(args, model, train_dataset,epoch):

    with torch.enable_grad():
        # Turn on training mode which enables dropout.
        model.train()
        total_loss = 0
        start_time = time.time()
        hidden = model.init_hidden(args.batch_size)
        for batch, i in enumerate(range(0, train_dataset.size(0) - 1, args.bptt)):
            inputSeq, targetSeq = get_batch(args,train_dataset, i)
            # inputSeq: [ seq_len * batch_size * feature_size ]
            # targetSeq: [ seq_len * batch_size * feature_size ]

            # Starting each batch, we detach the hidden state from how it was previously produced.
            # If we didn't, the model would try backpropagating all the way to start of the dataset.
            hidden = model.repackage_hidden(hidden)
            hidden_ = model.repackage_hidden(hidden)
            optimizer.zero_grad()

            '''Loss1: Free running loss'''
            outVal = inputSeq[0].unsqueeze(0)
            outVals=[]
            hids1 = []
            for i in range(inputSeq.size(0)):
                outVal, hidden_, hid = model.forward(outVal, hidden_,return_hiddens=True)
                outVals.append(outVal)
                hids1.append(hid)
            outSeq1 = torch.cat(outVals,dim=0)
            hids1 = torch.cat(hids1,dim=0)
            loss1 = criterion(outSeq1.view(args.batch_size,-1), targetSeq.view(args.batch_size,-1))

            '''Loss2: Teacher forcing loss'''
            outSeq2, hidden, hids2 = model.forward(inputSeq, hidden, return_hiddens=True)
            loss2 = criterion(outSeq2.view(args.batch_size, -1), targetSeq.view(args.batch_size, -1))

            '''Loss3: Simplified Professor forcing loss'''
            loss3 = criterion(hids1.view(args.batch_size,-1), hids2.view(args.batch_size,-1).detach())

            '''Total loss = Loss1+Loss2+Loss3'''
            loss = loss1+loss2+loss3
            loss.backward()

            # `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
            torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
            optimizer.step()

            total_loss += loss.item()

            if batch % args.log_interval == 0 and batch > 0:
                cur_loss = total_loss / args.log_interval
                elapsed = time.time() - start_time
                print('| epoch {:3d} | {:5d}/{:5d} batches | ms/batch {:5.4f} | '
                      'loss {:5.2f} '.format(
                    epoch, batch, len(train_dataset) // args.bptt,
                                  elapsed * 1000 / args.log_interval, cur_loss))
                total_loss = 0
                start_time = time.time()
def train(args, model, train_dataset):
    # Turn on training mode which enables dropout.
    model.train()
    total_loss = 0
    start_time = time.time()
    hidden = model.init_hidden(args.batch_size)
    for batch, i in enumerate(range(0, train_dataset.size(0) - 1, args.bptt)):
        inputSeq, targetSeq = get_batch(train_dataset, i)
        # inputSeq: [ seq_len * batch_size * feature_size ]
        # targetSeq: [ seq_len * batch_size * feature_size ]

        # Starting each batch, we detach the hidden state from how it was previously produced.
        # If we didn't, the model would try backpropagating all the way to start of the dataset.
        hidden = model.repackage_hidden(hidden)
        optimizer.zero_grad()
        USE_TEACHER_FORCING = random.random() < args.teacher_forcing_ratio
        if USE_TEACHER_FORCING:
            outSeq, hidden = model.forward(inputSeq, hidden)
        else:
            outVal = inputSeq[0].unsqueeze(0)
            outVals = []
            for i in range(inputSeq.size(0)):
                outVal, hidden = model.forward(outVal, hidden)
                outVals.append(outVal)
            outSeq = torch.cat(outVals, dim=0)

        #print('outSeq:',outSeq.size())

        #print('targetSeq:', targetSeq.size())

        loss = criterion(outSeq.view(args.batch_size, -1),
                         targetSeq.view(args.batch_size, -1))
        loss.backward()

        # `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
        torch.nn.utils.clip_grad_norm(model.parameters(), args.clip)
        optimizer.step()

        # for p in model2_for_timeDiff.parameters():
        #    p.data.add_(-lr, p.grad.data)

        total_loss += loss.data

        if batch % args.log_interval == 0 and batch > 0:
            cur_loss = total_loss[0] / args.log_interval
            elapsed = time.time() - start_time
            print('| epoch {:3d} | {:5d}/{:5d} batches | ms/batch {:5.4f} | '
                  'loss {:5.2f} '.format(epoch, batch,
                                         len(train_dataset) // args.bptt,
                                         elapsed * 1000 / args.log_interval,
                                         cur_loss))
            total_loss = 0
            start_time = time.time()
示例#5
0
def predict_step(model, inputs):
    inputs = inputs.to(device)
    src_mask = model.generate_square_subsequent_mask(inputs.size(0)).to(device)
    return model.forward(
        inputs,
        src_mask=src_mask,
    )
示例#6
0
def evaluate_model(val_loader, model, criterion, config):
    """Evaluate the model"""

    validation_loss = AverageMeter()
    validation_accuracy = AverageMeter()
    if config["precision_recall"]:
        validation_precision = AverageMeter()
        validation_recall = AverageMeter()
    model.eval()
    with torch.no_grad():
        for i, (images, targets) in enumerate(val_loader):
            targets = targets.cuda()
            images = images.cuda()
            output = model.forward(images)
            loss = criterion(output, targets)
            validation_loss.update(loss.item(), images.size(0))
            y_true = targets.detach().cpu().numpy()
            y_score = torch.topk(output, 1).indices.reshape(
                output.size(0)).detach().cpu().numpy()
            acc = accuracy_score(y_true, y_score)
            validation_accuracy.update(acc, images.size(0))
            if config["precision_recall"]:
                rec = recall_score(y_true, y_score)
                prec = precision_score(y_true, y_score)
                validation_precision.update(prec, images.size(0))
                validation_recall.update(rec, images.size(0))
    if config["precision_recall"]:
        return mean(validation_loss.history), mean(
            validation_accuracy.history), mean(
                validation_precision.history), mean(validation_recall.history)
    else:
        return mean(validation_loss.history), mean(validation_accuracy.history)
示例#7
0
    def evaluate(args, model, test_dataset):
        # Turn on evaluation mode which disables dropout.
        model.eval()
        with torch.no_grad():
            total_loss = 0
            hidden = model.init_hidden(args.eval_batch_size)
            nbatch = 1
            for nbatch, i in enumerate(
                    range(0,
                          test_dataset.size(0) - 1, args.bptt)):
                inputSeq, targetSeq = get_batch(args, test_dataset, i)
                # inputSeq: [ seq_len * batch_size * feature_size ]
                # targetSeq: [ seq_len * batch_size * feature_size ]
                hidden_ = model.repackage_hidden(hidden)
                '''Loss1: Free running loss'''
                outVal = inputSeq[0].unsqueeze(0)
                outVals = []
                hids1 = []
                for i in range(inputSeq.size(0)):
                    outVal, hidden_, hid = model.forward(outVal,
                                                         hidden_,
                                                         return_hiddens=True)
                    outVals.append(outVal)
                    hids1.append(hid)
                outSeq1 = torch.cat(outVals, dim=0)
                hids1 = torch.cat(hids1, dim=0)
                loss1 = criterion(
                    outSeq1.contiguous().view(args.batch_size, -1),
                    targetSeq.contiguous().view(args.batch_size, -1))
                '''Loss2: Teacher forcing loss'''
                outSeq2, hidden, hids2 = model.forward(inputSeq,
                                                       hidden,
                                                       return_hiddens=True)
                loss2 = criterion(
                    outSeq2.contiguous().view(args.batch_size, -1),
                    targetSeq.contiguous().view(args.batch_size, -1))
                '''Loss3: Simplified Professor forcing loss'''
                loss3 = criterion(hids1.view(args.batch_size, -1),
                                  hids2.view(args.batch_size, -1).detach())
                '''Total loss = Loss1+Loss2+Loss3'''
                loss = loss1 + loss2 + loss3

                total_loss += loss.item()

        return total_loss / (nbatch + 1)
def anomalyScore(args,model,test_dataset,mean,cov,endPoint=10000):
    # Turn on evaluation mode which disables dropout.
    model.eval()
    pasthidden = model.init_hidden(1)
    predictions = []
    organized = []
    errors = []
    # out = Variable(test_dataset[0].unsqueeze(0))
    for t in range(endPoint):
        out, hidden = model.forward(Variable(test_dataset[t].unsqueeze(0), volatile=True), pasthidden)
        predictions.append([])
        organized.append([])
        errors.append([])
        predictions[t].append(out.data.cpu()[0][0][0])
        pasthidden = model.repackage_hidden(hidden)
        for prediction_step in range(1, args.prediction_window_size):
            out, hidden = model.forward(out, hidden)
            predictions[t].append(out.data.cpu()[0][0][0])

        if t >= args.prediction_window_size:
            for step in range(args.prediction_window_size):
                organized[t].append(
                    predictions[step + t - args.prediction_window_size][args.prediction_window_size - 1 - step])
            organized[t] =torch.FloatTensor(organized[t]).unsqueeze(0)
            errors[t] = organized[t] - test_dataset[t][0][0]
            if args.cuda:
                errors[t] = errors[t].cuda()
        else:
            organized[t] = torch.zeros(1,args.prediction_window_size)
            errors[t] = torch.zeros(1,args.prediction_window_size)
            if args.cuda:
                errors[t] = errors[t].cuda()

    scores = []
    for error in errors:
        mult1 = error-mean.unsqueeze(0) # [ 1 * prediction_window_size ]
        mult2 = torch.inverse(cov) # [ prediction_window_size * prediction_window_size ]
        mult3 = mult1.t() # [ prediction_window_size * 1 ]
        score = torch.mm(mult1,torch.mm(mult2,mult3))
        scores.append(score[0][0])
    return scores, organized, errors
def evaluate(args, model, test_dataset):
    # Turn on evaluation mode which disables dropout.
    model.eval()
    with torch.no_grad():
        total_loss = 0
        hidden = model.init_hidden(args.eval_batch_size)
        nbatch = 1
        for nbatch, i in enumerate(range(0, test_dataset.size(0) - 1, args.bptt)):
            inputSeq, targetSeq = get_batch(args,test_dataset, i)
            # inputSeq: [ seq_len * batch_size * feature_size ]
            # targetSeq: [ seq_len * batch_size * feature_size ]
            hidden_ = model.repackage_hidden(hidden)
            '''Loss1: Free running loss'''
            outVal = inputSeq[0].unsqueeze(0)
            outVals=[]
            hids1 = []
            for i in range(inputSeq.size(0)):
                outVal, hidden_, hid = model.forward(outVal, hidden_,return_hiddens=True)
                outVals.append(outVal)
                hids1.append(hid)
            outSeq1 = torch.cat(outVals,dim=0)
            hids1 = torch.cat(hids1,dim=0)
            loss1 = criterion(outSeq1.view(args.batch_size,-1), targetSeq.view(args.batch_size,-1))

            '''Loss2: Teacher forcing loss'''
            outSeq2, hidden, hids2 = model.forward(inputSeq, hidden, return_hiddens=True)
            loss2 = criterion(outSeq2.view(args.batch_size, -1), targetSeq.view(args.batch_size, -1))

            '''Loss3: Simplified Professor forcing loss'''
            loss3 = criterion(hids1.view(args.batch_size,-1), hids2.view(args.batch_size,-1).detach())

            '''Total loss = Loss1+Loss2+Loss3'''
            loss = loss1+loss2+loss3

            total_loss += loss.item()

    return total_loss / (nbatch+1)
def evaluate_1step_pred(args, model, test_dataset):
    # Turn on evaluation mode which disables dropout.
    model.eval()
    total_loss = 0
    with torch.no_grad():
        hidden = model.init_hidden(args.eval_batch_size)
        for nbatch, i in enumerate(range(0, test_dataset.size(0) - 1, args.bptt)):

            inputSeq, targetSeq = get_batch(args,test_dataset, i)
            outSeq, hidden = model.forward(inputSeq, hidden)

            loss = criterion(outSeq.view(args.batch_size,-1), targetSeq.view(args.batch_size,-1))
            hidden = model.repackage_hidden(hidden)
            total_loss+= loss.item()

    return total_loss / nbatch
示例#11
0
def evaluate_1step_pred(args, model, test_dataset):
    # Turn on evaluation mode which disables dropout.
    model.eval()
    total_loss = 0
    with torch.no_grad():
        hidden = model.init_hidden(args.eval_batch_size)
        for nbatch, i in enumerate(range(0, test_dataset.size(0) - 1, args.bptt)):

            inputSeq, targetSeq = get_batch(args,test_dataset, i)
            outSeq, hidden = model.forward(inputSeq, hidden)

            loss = criterion(outSeq.view(args.batch_size,-1), targetSeq.view(args.batch_size,-1))
            hidden = model.repackage_hidden(hidden)
            total_loss+= loss.item()

    return total_loss / nbatch
def evaluate(args, model, test_dataset):
    # Turn on evaluation mode which disables dropout.
    model.eval()
    total_loss = 0
    hidden = model.init_hidden(args.eval_batch_size)
    for nbatch, i in enumerate(range(0, test_dataset.size(0) - 1, args.bptt)):

        inputSeq, targetSeq = get_batch(test_dataset, i, evaluation=True)
        # outVal = inputSeq[0].unsqueeze(0)
        # outVals = []
        # for i in range(inputSeq.size(0)):
        #     outVal, hidden = model.forward(outVal, hidden)
        #     outVals.append(outVal)
        # outSeq = torch.cat(outVals, dim=0)

        outSeq, hidden = model.forward(inputSeq, hidden)

        loss = criterion(outSeq.view(args.batch_size,-1), targetSeq.view(args.batch_size,-1))
        hidden = model.repackage_hidden(hidden)
        total_loss+= loss.data

    return total_loss[0] / nbatch
示例#13
0
def run_inference(img_path, output_dir, args):
    """ A function making inference using the pre-trained darknet weights in the tensorflow 
		framework 
		Input:
			img_path: string, path to the image on which inference is to be run, path to the image directory containing images in the case of multiple images.
			output_dir: string, directory for saving the output
			args: argparse object
	"""

    # Reading the images
    if not os.path.exists(output_dir):
        os.mkdir(output_dir)
    if not os.path.exists(os.path.join(output_dir, 'images')):
        os.mkdir(os.path.join(output_dir, 'images'))
    if not os.path.exists(os.path.join(output_dir, 'labels')):
        os.mkdir(os.path.join(output_dir, 'labels'))

    output_dir_images = os.path.join(output_dir, 'images')
    output_dir_labels = os.path.join(output_dir, 'labels')

    file_names = sorted(os.listdir(img_path))
    images_batch = read_image(img_path)

    # Getting anchors and labels for the prediction
    class_names = get_classes(config.classes_path)

    num_classes = config.num_classes
    num_anchors = config.num_anchors

    # Retriving the input shape of the model i.e. (608x608), (416x416), (320x320)
    input_shape = (config.input_shape, config.input_shape)

    # Defining placeholder for passing the image data onto the model
    image_tensor = tf.placeholder(dtype=tf.float32,
                                  shape=[None, None, None, 3])
    image_shape = tf.placeholder(dtype=tf.int32, shape=[2])

    model = model(image_tensor,
                  is_training=False,
                  num_classes=config.num_classes)
    output_nodes, model_layers = model.forward()

    print('Summary of the model created.......\n')
    for layer in model_layers:
        print(layer)

    # Creating a session for running the model
    gpu_config = tf.ConfigProto(log_device_placement=False)
    gpu_config.gpu_options.allow_growth = True
    sess = tf.Session(config=gpu_config)

    output_values = predict(output_nodes, num_classes, input_shape,
                            image_shape)

    ckpt_path = config.model_dir + 'valid/'
    exponential_moving_average_obj = tf.train.ExponentialMovingAverage(
        config.weight_decay)
    saver = tf.train.Saver(
        exponential_moving_average_obj.variables_to_restore())
    ckpt = tf.train.get_checkpoint_state(ckpt_path)
    # chkp.print_tensors_in_checkpoint_file(checkmate.get_best_checkpoint(ckpt_path), tensor_name='', all_tensors=True)
    # exit()
    if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):
        print('Restoring model ', checkmate.get_best_checkpoint(ckpt_path))
        saver.restore(sess, checkmate.get_best_checkpoint(ckpt_path))
        print('Model Loaded!')

    total_time_pred = []
    for x in range(len(images_batch)):

        image = images_batch[x]
        new_image_size = (config.input_shape, config.input_shape)
        image_data = np.array(resize_image(image, new_image_size))
        print('Image height: {}\tImage width: {}'.format(
            image.shape[0], image.shape[1]))

        img = image_data / 255.
        img = np.expand_dims(img, 0)  # Adding the batch dimension

        tick = time()
        # Actually run the graph in a tensorflow session to get the outputs
        out_values = sess.run([output_values],
                              feed_dict={
                                  image_tensor: img,
                                  image_shape:
                                  [image.shape[0], image.shape[1]]
                              })
        tock = time()
        total_time_pred.append(tock - tick)

        print('Found {} boxes for {} in {}sec'.format(len(out_boxes), 'img',
                                                      tock - tick))

        ######################## Visualization ######################
        font = ImageFont.truetype(font='./font/FiraMono-Medium.otf',
                                  size=np.floor(1e-2 * image.shape[1] +
                                                0.5).astype(np.int32))
        thickness = (image.shape[0] + image.shape[1]) // 500  # do day cua BB

        image = Image.fromarray((image).astype('uint8'), mode='RGB')
        output_labels = open(
            os.path.join(output_dir_labels,
                         file_names[x].split('.')[0] + '.txt'), 'w')
        ### DO ALL THE PLOTTING THING IF REQUIRED ###
        ### SAVE THE IMAGE ###

        output_labels.close()  # Saving labels

    sess.close()

    total_time_pred = sum(total_time_pred[1:])
    print('FPS of model with post processing over {} images is {}'.format(
        len(images_batch) - 1, (len(images_batch) - 1) / total_time_pred))
def generate_output(args,epoch, model, gen_dataset, disp_uncertainty=True,startPoint=500, endPoint=3500):
    if args.save_fig:
        # Turn on evaluation mode which disables dropout.
        model.eval()
        hidden = model.init_hidden(1)
        outSeq = []
        upperlim95 = []
        lowerlim95 = []
        with torch.no_grad():
            for i in range(endPoint):
                if i>=startPoint:
                    # if disp_uncertainty and epoch > 40:
                    #     outs = []
                    #     model.train()
                    #     for i in range(20):
                    #         out_, hidden_ = model.forward(out+0.01*Variable(torch.randn(out.size())).cuda(),hidden,noise=True)
                    #         outs.append(out_)
                    #     model.eval()
                    #     outs = torch.cat(outs,dim=0)
                    #     out_mean = torch.mean(outs,dim=0) # [bsz * feature_dim]
                    #     out_std = torch.std(outs,dim=0) # [bsz * feature_dim]
                    #     upperlim95.append(out_mean + 2.58*out_std/np.sqrt(20))
                    #     lowerlim95.append(out_mean - 2.58*out_std/np.sqrt(20))

                    out, hidden = model.forward(out, hidden)

                    #print(out_mean,out)

                else:
                    out, hidden = model.forward(gen_dataset[i].unsqueeze(0), hidden)
                outSeq.append(out.data.cpu()[0][0].unsqueeze(0))


        outSeq = torch.cat(outSeq,dim=0) # [seqLength * feature_dim]

        target= preprocess_data.reconstruct(gen_dataset.cpu().numpy(), TimeseriesData.mean, TimeseriesData.std)
        outSeq = preprocess_data.reconstruct(outSeq.numpy(), TimeseriesData.mean, TimeseriesData.std)
        # if epoch>40:
        #     upperlim95 = torch.cat(upperlim95, dim=0)
        #     lowerlim95 = torch.cat(lowerlim95, dim=0)
        #     upperlim95 = preprocess_data.reconstruct(upperlim95.data.cpu().numpy(),TimeseriesData.mean,TimeseriesData.std)
        #     lowerlim95 = preprocess_data.reconstruct(lowerlim95.data.cpu().numpy(),TimeseriesData.mean,TimeseriesData.std)

        plt.figure(figsize=(15,5))
        for i in range(target.size(-1)):
            plt.plot(target[:,:,i].numpy(), label='Target'+str(i),
                     color='black', marker='.', linestyle='--', markersize=1, linewidth=0.5)
            plt.plot(range(startPoint), outSeq[:startPoint,i].numpy(), label='1-step predictions for target'+str(i),
                     color='green', marker='.', linestyle='--', markersize=1.5, linewidth=1)
            # if epoch>40:
            #     plt.plot(range(startPoint, endPoint), upperlim95[:,i].numpy(), label='upperlim'+str(i),
            #              color='skyblue', marker='.', linestyle='--', markersize=1.5, linewidth=1)
            #     plt.plot(range(startPoint, endPoint), lowerlim95[:,i].numpy(), label='lowerlim'+str(i),
            #              color='skyblue', marker='.', linestyle='--', markersize=1.5, linewidth=1)
            plt.plot(range(startPoint, endPoint), outSeq[startPoint:,i].numpy(), label='Recursive predictions for target'+str(i),
                     color='blue', marker='.', linestyle='--', markersize=1.5, linewidth=1)

        plt.xlim([startPoint-500, endPoint])
        plt.xlabel('Index',fontsize=15)
        plt.ylabel('Value',fontsize=15)
        plt.title('Time-series Prediction on ' + args.data + ' Dataset', fontsize=18, fontweight='bold')
        plt.legend()
        plt.tight_layout()
        plt.text(startPoint-500+10, target.min(), 'Epoch: '+str(epoch),fontsize=15)
        save_dir = Path('result',args.data,args.filename).with_suffix('').joinpath('fig_prediction')
        save_dir.mkdir(parents=True,exist_ok=True)
        plt.savefig(save_dir.joinpath('fig_epoch'+str(epoch)).with_suffix('.png'))
        #plt.show()
        plt.close()
        return outSeq

    else:
        pass
示例#15
0
import tensorflow as tf
from model import model
import ujson as json
import utils
from reader import reader

with open('./params.json', 'rb') as f:
    params = json.load(f)

vocab, vocab_map, reverse_map = utils.vocab_maker(params['raw_data_dir'])
params['vocab_size'] = max(vocab_map.values()) + 1

reader = reader()
model = model(params)
model.forward()
model.backward()

fp = open('./data/dataset.pkl', 'rb')
init_op = [tf.local_variables_initializer(), tf.global_variables_initializer()]
sess = tf.Session()
sess.run(init_op)
for epoch in range(params['n_epochs']):
    batch = reader.read_batches(fp)
    inps, trgts, weights = utils.process_batches(batch, vocab_map)
    feed_dict = {model.inp: inps, model.trgt: trgts, model.weights: weights}
    logits, loss, _ = sess.run([model.logits, model.loss, model.train_op],
                               feed_dict)
    print(loss)
示例#16
0
def train(ckpt_path, log_path, class_path):
	""" Function to train the model.
		ckpt_path: string, path for saving/restoring the model
		log_path: string, path for saving the training/validation logs
		class_path: string, path for the classes of the dataset
		decay_steps: int, steps after which the learning rate is to be decayed
		decay_rate: float, rate to carrying out exponential decay
	"""


	# Getting the anchors
	anchors = read_anchors(config.anchors_path)
	if not os.path.exists(config.data_dir):
		os.mkdir(config.data_dir)

	classes = get_classes(class_path)

	# Building the training pipeline
	graph = tf.get_default_graph()

	with graph.as_default():

		# Getting the training data
		with tf.name_scope('data_parser/'):
			train_reader = Parser('train', config.data_dir, config.anchors_path, config.output_dir, 
				config.num_classes, input_shape=config.input_shape, max_boxes=config.max_boxes)
			train_data = train_reader.build_dataset(config.train_batch_size//config.subdivisions)
			train_iterator = train_data.make_one_shot_iterator()

			val_reader = Parser('val', config.data_dir, config.anchors_path, config.output_dir, 
				config.num_classes, input_shape=config.input_shape, max_boxes=config.max_boxes)
			val_data = val_reader.build_dataset(config.val_batch_size)
			val_iterator = val_data.make_one_shot_iterator()


			is_training = tf.placeholder(dtype=tf.bool, shape=[], name='train_flag') # Used for different behaviour of batch normalization
			mode = tf.placeholder(dtype=tf.int16, shape=[], name='mode_flag')


			def train():
				return train_iterator.get_next()
			def valid():
				return val_iterator.get_next()


			images, labels = tf.cond(pred=tf.equal(mode, 1), true_fn=train, false_fn=valid, name='train_val_data')
			grid_shapes = [config.input_shape // 32, config.input_shape // 16, config.input_shape // 8]

			images.set_shape([None, config.input_shape, config.input_shape, 3])
			labels.set_shape([None, required_shape, 5])

			# image_summary = draw_box(images, bbox, file_name)

		if not os.path.exists(ckpt_path):
			os.mkdir(ckpt_path)

		model = model(images, is_training, config.num_classes, config.num_anchors_per_scale, config.weight_decay, config.norm_decay)
		output, model_layers = model.forward()

		print('Summary of the created model.......\n')
		for layer in model_layers:
			print(layer)

		# Declaring the parameters for GT
		with tf.name_scope('Targets'):
			### GT PROCESSING ###

		# Compute Loss
		with tf.name_scope('Loss_and_Detect'):
			loss_scale,summaries = compute_loss(output, y_true, config.num_classes, ignore_threshold=config.ignore_thresh)
			exponential_moving_average_op = tf.train.ExponentialMovingAverage(config.weight_decay).apply(var_list=tf.trainable_variables())
			loss = model_loss
			model_loss_summary = tf.summary.scalar('model_loss', summaries, family='Losses')


		# Declaring the parameters for training the model
		with tf.name_scope('train_parameters'):
			global_step = tf.Variable(0, trainable=False, name='global_step')

		# Declaring the parameters for training the model
		with tf.name_scope('train_parameters'):
			global_step = tf.Variable(0, trainable=False, name='global_step')

			def learning_rate_scheduler(learning_rate, scheduler_name, global_step, decay_steps=100):
				if scheduler_name == 'exponential':
					lr =  tf.train.exponential_decay(learning_rate, global_step,
						decay_steps, decay_rate, staircase=True, name='exponential_learning_rate')
					return tf.maximum(lr, config.learning_rate_lower_bound)
				elif scheduler_name == 'polynomial':
					lr =  tf.train.polynomial_decay(learning_rate, global_step,
						decay_steps, config.learning_rate_lower_bound, power=0.8, cycle=True, name='polynomial_learning_rate')
					return tf.maximum(lr, config.learning_rate_lower_bound)
				elif scheduler_name == 'cosine':
					lr = tf.train.cosine_decay(learning_rate, global_step,
						decay_steps, alpha=0.5, name='cosine_learning_rate')
					return tf.maximum(lr, config.learning_rate_lower_bound)
				elif scheduler_name == 'linear':
					return tf.convert_to_tensor(learning_rate, name='linear_learning_rate')
				else:
					raise ValueError('Unsupported learning rate scheduler\n[supported types: exponential, polynomial, linear]')


			if config.use_warm_up:
				learning_rate = tf.cond(pred=tf.less(global_step, config.burn_in_epochs * (config.train_num // config.train_batch_size)),
					true_fn=lambda: learning_rate_scheduler(config.init_learning_rate, config.warm_up_lr_scheduler, global_step),
					false_fn=lambda: learning_rate_scheduler(config.learning_rate, config.lr_scheduler, global_step, decay_steps=2000))
			else:
				learning_rate = learning_rate_scheduler(config.learning_rate, config.lr_scheduler, global_step=global_step, decay_steps=2000)

			tf.summary.scalar('learning rate', learning_rate, family='Train_Parameters')


		# Define optimizer for minimizing the computed loss
		with tf.name_scope('Optimizer'):
			optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=config.momentum)
			# optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
			# optimizer = tf.train.RMSPropOptimizer(learning_rate=learning_rate, momentum=config.momentum)
			update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
			with tf.control_dependencies(update_ops):
				# grads = optimizer.compute_gradients(loss=loss)
				# gradients = [(tf.placeholder(dtype=tf.float32, shape=grad[1].get_shape()), grad[1]) for grad in grads]
				# train_step = optimizer.apply_gradients(grads_and_vars=gradients, global_step=global_step)
				optimizing_op = optimizer.minimize(loss=loss, global_step=global_step)
			
			with tf.control_dependencies([optimizing_op]):
				with tf.control_dependencies([exponential_moving_average_op]):
					train_op_with_mve = tf.no_op()
			train_op = train_op_with_mve



#################################### Training loop ############################################################
		# A saver object for saving the model
		best_ckpt_saver_train = checkmate.BestCheckpointSaver(save_dir=ckpt_path+'train/', num_to_keep=5)
		best_ckpt_saver_valid = checkmate.BestCheckpointSaver(save_dir=ckpt_path+'valid/', num_to_keep=5)
		summary_op = tf.summary.merge_all()
		summary_op_valid = tf.summary.merge([model_loss_summary_without_learning_rate])
		init_op = tf.global_variables_initializer()


		
		# Defining some train loop dependencies
		gpu_config = tf.ConfigProto(log_device_placement=False)
		gpu_config.gpu_options.allow_growth = True
		sess = tf.Session(config=gpu_config)
		tf.logging.set_verbosity(tf.logging.ERROR)
		train_summary_writer = tf.summary.FileWriter(os.path.join(log_path, 'train'), sess.graph)
		val_summary_writer = tf.summary.FileWriter(os.path.join(log_path, 'val'), sess.graph)

		print(sess.run(receptive_field))
		
		# Restoring the model
		ckpt = tf.train.get_checkpoint_state(ckpt_path+'train/')
		if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):
			print('Restoring model ', checkmate.get_best_checkpoint(ckpt_path+'train/'))
			tf.train.Saver().restore(sess, checkmate.get_best_checkpoint(ckpt_path+'train/'))
			print('Model Loaded!')
		else:
			sess.run(init_op)

		print('Uninitialized variables: ', sess.run(tf.report_uninitialized_variables()))


		epochbar = tqdm(range(config.Epoch))
		for epoch in epochbar:
			epochbar.set_description('Epoch %s of %s' % (epoch, config.Epoch))
			mean_loss_train = []
			mean_loss_valid = []

			trainbar = tqdm(range(config.train_num//config.train_batch_size))
			for k in trainbar:

				num_steps, train_summary, loss_train, _ = sess.run([global_step, summary_op, loss,
					train_op], feed_dict={is_training: True, mode: 1})

				train_summary_writer.add_summary(train_summary, epoch)
				train_summary_writer.flush()
				mean_loss_train.append(loss_train)
				trainbar.set_description('Train loss: %s' %str(loss_train))


			print('Validating.....')
			valbar = tqdm(range(config.val_num//config.val_batch_size))
			for k in valbar:
				val_summary, loss_valid = sess.run([summary_op_valid, loss], feed_dict={is_training: False, mode: 0})
				val_summary_writer.add_summary(val_summary, epoch)
				val_summary_writer.flush()
				mean_loss_valid.append(loss_valid)
				valbar.set_description('Validation loss: %s' %str(loss_valid))

			mean_loss_train = np.mean(mean_loss_train)
			mean_loss_valid = np.mean(mean_loss_valid)

			print('\n')
			print('Train loss after %d epochs is: %f' %(epoch+1, mean_loss_train))
			print('Validation loss after %d epochs is: %f' %(epoch+1, mean_loss_valid))
			print('\n\n')

			if (config.use_warm_up):
				if (num_steps > config.burn_in_epochs * (config.train_num // config.train_batch_size)):
					best_ckpt_saver_train.handle(mean_loss_train, sess, global_step)
					best_ckpt_saver_valid.handle(mean_loss_valid, sess, global_step)
				else:
					continue
			else:
				best_ckpt_saver_train.handle(mean_loss_train, sess, global_step)
				best_ckpt_saver_valid.handle(mean_loss_valid, sess, global_step)

		print('Tuning Completed!!')
		train_summary_writer.close()
		val_summary_writer.close()
		sess.close()





def main():
	""" main function which calls all the other required functions for training """
	os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
	os.environ["CUDA_VISIBLE_DEVICES"] = str(config.gpu_num)
	train(config.model_dir, config.logs_dir, config.classes_path)
	os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' 



if __name__ == '__main__':
	main()
示例#17
0
def generate_output(args, epoch, model, gen_dataset, scale_norm, data_organization, disp_uncertainty=True, figNumber = 30, startPoint = 50, endPoint = 400):
    
    if args.save_fig:
        # Turn on evaluation mode which disables dropout.
        model.eval()
        
        outSeq = []
        # upperlim95 = []
        # lowerlim95 = []
        
        for n in range(figNumber):
            tempOutSeq = []
            hidden = model.init_hidden(1)
            with torch.no_grad():
                for i in range(endPoint):
                    if i>=startPoint:
                        # if disp_uncertainty and epoch > 40:
                        #     outs = []
                        #     model.train()
                        #     for i in range(20):
                        #         out_, hidden_ = model.forward(out+0.01*Variable(torch.randn(out.size())).cuda(),hidden,noise=True)
                        #         outs.append(out_)
                        #     model.eval()
                        #     outs = torch.cat(outs,dim=0)
                        #     out_mean = torch.mean(outs,dim=0) # [bsz * feature_dim]
                        #     out_std = torch.std(outs,dim=0) # [bsz * feature_dim]
                        #     upperlim95.append(out_mean + 2.58*out_std/np.sqrt(20))
                        #     lowerlim95.append(out_mean - 2.58*out_std/np.sqrt(20))
    
                        out, hidden = model.forward(out, hidden)
    
                        #print(out_mean,out)
    
                    else:
                        out, hidden = model.forward(gen_dataset[n][i].unsqueeze(0).unsqueeze(0).float(), hidden)
                    tempOutSeq.append(out)
                    
                tempOutSeq = torch.cat(tempOutSeq, dim=1)
            outSeq.append(tempOutSeq)

        outSeq = torch.cat(outSeq, dim=0) # [seqLength * feature_dim]

        target = denormalized_data(gen_dataset[:figNumber].cpu().numpy(), scale_norm, DATA_ORGANIZATION, data_type)

        outSeq = denormalized_data(outSeq.cpu().numpy(), scale_norm, DATA_ORGANIZATION, data_type)
  
        # if epoch>40:
        #     upperlim95 = torch.cat(upperlim95, dim=0)
        #     lowerlim95 = torch.cat(lowerlim95, dim=0)
        #     upperlim95 = preprocess_data.reconstruct(upperlim95.data.cpu().numpy(),TimeseriesData.mean,TimeseriesData.std)
        #     lowerlim95 = preprocess_data.reconstruct(lowerlim95.data.cpu().numpy(),TimeseriesData.mean,TimeseriesData.std)

        if data_organization == 'partial_combination':
            for i in range(target.shape[0]):
                fig = plt.figure(figsize=(15,5))
                plt.axis('off')
                plt.grid(b=None)
                plt.title('Time-series Prediction on ' + args.data + ' Dataset', y = 1.05, fontsize=18, fontweight='bold')
                for j in range(3):     
                    ax = fig.add_subplot(3, 1, j+1)
                    ax.plot(target[i,:,j], label='Target'+str(i),
                             color='black', marker='.', linestyle='--', markersize=1, linewidth=0.5)
                    ax.plot(range(startPoint), outSeq[i,:startPoint,j], label='1-step predictions for target'+str(i),
                             color='green', marker='.', linestyle='--', markersize=1.5, linewidth=1)
                    # if epoch>40:
                    #     plt.plot(range(startPoint, endPoint), upperlim95[:,i].numpy(), label='upperlim'+str(i),
                    #              color='skyblue', marker='.', linestyle='--', markersize=1.5, linewidth=1)
                    #     plt.plot(range(startPoint, endPoint), lowerlim95[:,i].numpy(), label='lowerlim'+str(i),
                    #              color='skyblue', marker='.', linestyle='--', markersize=1.5, linewidth=1)
                    ax.plot(range(startPoint, endPoint), outSeq[i,startPoint:,j], label='Recursive predictions for target'+str(i),
                             color='blue', marker='.', linestyle='--', markersize=1.5, linewidth=1)
                    
                    # plt.xlim([startPoint-500, endPoint])
                    plt.xlabel('Index',fontsize=15)
                    plt.ylabel('Value',fontsize=15)
                    plt.legend()
                plt.subplots_adjust(wspace = 0.2, hspace = 0.3)
                # plt.tight_layout()
                # plt.text(startPoint-500+10, target.min(), 'Epoch: '+str(epoch),fontsize=15)                               
                save_dir = Path('result',args.data,args.filename).with_suffix('').joinpath('fig_prediction')
                save_dir.mkdir(parents=True,exist_ok=True)
                plt.savefig(save_dir.joinpath('fig_epoch'+str(epoch)+'_'+str(i+1)).with_suffix('.png'))
                plt.show()
                plt.close()
        elif data_organization == 'full_combination':
            for i in range(target.shape[0]): 
                fig = plt.figure(figsize=(15,5))
                plt.axis('off')
                plt.grid(b=None)
                plt.title('Time-series Prediction on ' + args.data + ' Dataset', y = 1.05, fontsize=18, fontweight='bold')
                for j in range(4):
                    
                    ax = fig.add_subplot(2, 2, j+1)
                    ax.plot(target[i,:,j], label='Target'+str(i),
                             color='black', marker='.', linestyle='--', markersize=1, linewidth=0.5)
                    ax.plot(range(startPoint), outSeq[i,:startPoint,j], label='1-step predictions for target'+str(i),
                             color='green', marker='.', linestyle='--', markersize=1.5, linewidth=1)
                    # if epoch>40:
                    #     plt.plot(range(startPoint, endPoint), upperlim95[:,i].numpy(), label='upperlim'+str(i),
                    #              color='skyblue', marker='.', linestyle='--', markersize=1.5, linewidth=1)
                    #     plt.plot(range(startPoint, endPoint), lowerlim95[:,i].numpy(), label='lowerlim'+str(i),
                    #              color='skyblue', marker='.', linestyle='--', markersize=1.5, linewidth=1)
                    ax.plot(range(startPoint, endPoint), outSeq[i,startPoint:,j], label='Recursive predictions for target'+str(i),
                             color='blue', marker='.', linestyle='--', markersize=1.5, linewidth=1)    
                    
                    # plt.xlim([startPoint-500, endPoint])
                    plt.xlabel('Index',fontsize=15)
                    plt.ylabel('Value',fontsize=15)
                    plt.legend()
                    
                plt.subplots_adjust(wspace = 0.2, hspace = 0.3)
                # plt.tight_layout()
                # plt.text(startPoint-500+10, target.min(), 'Epoch: '+str(epoch),fontsize=15)                
                # plt.show()
                save_dir = Path('result',args.data,args.filename).with_suffix('').joinpath('fig_prediction')
                save_dir.mkdir(parents=True,exist_ok=True)
                plt.savefig(save_dir.joinpath('fig_epoch'+str(epoch)+'_'+str(i+1)).with_suffix('.png'))
                plt.show()
                plt.close()
        return outSeq

    else:
        pass
def generate_output(args,
                    epoch,
                    model,
                    gen_dataset,
                    disp_uncertainty=True,
                    startPoint=500,
                    endPoint=3500):
    if args.save_fig:
        # Turn on evaluation mode which disables dropout.
        model.eval()
        hidden = model.init_hidden(1)
        outSeq = []
        upperlim95 = []
        lowerlim95 = []
        with torch.no_grad():
            for i in range(endPoint):
                if i >= startPoint:
                    # if disp_uncertainty and epoch > 40:
                    #     outs = []
                    #     model.train()
                    #     for i in range(20):
                    #         out_, hidden_ = model.forward(out+0.01*Variable(torch.randn(out.size())).cuda(),hidden,noise=True)
                    #         outs.append(out_)
                    #     model.eval()
                    #     outs = torch.cat(outs,dim=0)
                    #     out_mean = torch.mean(outs,dim=0) # [bsz * feature_dim]
                    #     out_std = torch.std(outs,dim=0) # [bsz * feature_dim]
                    #     upperlim95.append(out_mean + 2.58*out_std/np.sqrt(20))
                    #     lowerlim95.append(out_mean - 2.58*out_std/np.sqrt(20))

                    out, hidden = model.forward(out, hidden)

                    #print(out_mean,out)

                else:
                    out, hidden = model.forward(gen_dataset[i].unsqueeze(0),
                                                hidden)
                outSeq.append(out.data.cpu()[0][0].unsqueeze(0))

        outSeq = torch.cat(outSeq, dim=0)  # [seqLength * feature_dim]

        target = preprocess_data.reconstruct(gen_dataset.cpu(),
                                             TimeseriesData.mean,
                                             TimeseriesData.std)
        outSeq = preprocess_data.reconstruct(outSeq, TimeseriesData.mean,
                                             TimeseriesData.std)
        # if epoch>40:
        #     upperlim95 = torch.cat(upperlim95, dim=0)
        #     lowerlim95 = torch.cat(lowerlim95, dim=0)
        #     upperlim95 = preprocess_data.reconstruct(upperlim95.data.cpu().numpy(),TimeseriesData.mean,TimeseriesData.std)
        #     lowerlim95 = preprocess_data.reconstruct(lowerlim95.data.cpu().numpy(),TimeseriesData.mean,TimeseriesData.std)

        plt.figure(figsize=(15, 5))
        for i in range(target.size(-1)):
            plt.plot(target[:, :, i].numpy(),
                     label='Target' + str(i),
                     color='black',
                     marker='.',
                     linestyle='--',
                     markersize=1,
                     linewidth=0.5)
            plt.plot(range(startPoint),
                     outSeq[:startPoint, i].numpy(),
                     label='1-step predictions for target' + str(i),
                     color='green',
                     marker='.',
                     linestyle='--',
                     markersize=1.5,
                     linewidth=1)
            # if epoch>40:
            #     plt.plot(range(startPoint, endPoint), upperlim95[:,i].numpy(), label='upperlim'+str(i),
            #              color='skyblue', marker='.', linestyle='--', markersize=1.5, linewidth=1)
            #     plt.plot(range(startPoint, endPoint), lowerlim95[:,i].numpy(), label='lowerlim'+str(i),
            #              color='skyblue', marker='.', linestyle='--', markersize=1.5, linewidth=1)
            plt.plot(range(startPoint, endPoint),
                     outSeq[startPoint:, i].numpy(),
                     label='Recursive predictions for target' + str(i),
                     color='blue',
                     marker='.',
                     linestyle='--',
                     markersize=1.5,
                     linewidth=1)

        plt.xlim([startPoint - 500, endPoint])
        plt.xlabel('Index', fontsize=15)
        plt.ylabel('Value', fontsize=15)
        plt.title('Time-series Prediction on ' + args.data + ' Dataset',
                  fontsize=18,
                  fontweight='bold')
        plt.legend()
        plt.tight_layout()
        plt.text(startPoint - 500 + 10,
                 target.min(),
                 'Epoch: ' + str(epoch),
                 fontsize=15)
        save_dir = Path(
            args.path_save + '/result', args.data,
            args.filename).with_suffix('').joinpath('fig_prediction')
        save_dir.mkdir(parents=True, exist_ok=True)
        plt.savefig(
            save_dir.joinpath('fig_epoch' + str(epoch)).with_suffix('.png'))
        #plt.show()
        plt.close()
        return outSeq

    else:
        pass
def generate_output(args,
                    epoch,
                    model,
                    gen_dataset,
                    startPoint=500,
                    endPoint=3500):
    # Turn on evaluation mode which disables dropout.
    model.eval()
    hidden = model.init_hidden(1)
    outSeq = []
    for i in range(endPoint):

        if i > startPoint:
            out, hidden = model.forward(out, hidden)
        else:
            out, hidden = model.forward(
                Variable(gen_dataset[i].unsqueeze(0), volatile=True), hidden)

        outValue = out.data.cpu()[0][0][0]

        outSeq.append(outValue)

    target = preprocess_data.reconstruct(
        gen_dataset.cpu()[:, 0,
                          0].numpy(), TimeseriesData.trainData['seqData_mean'],
        TimeseriesData.trainData['seqData_std'])

    outSeq = preprocess_data.reconstruct(
        np.array(outSeq), TimeseriesData.trainData['seqData_mean'],
        TimeseriesData.trainData['seqData_std'])

    plt.figure(figsize=(15, 5))
    plot1 = plt.plot(target,
                     label='Target',
                     color='black',
                     marker='.',
                     linestyle='--',
                     markersize=1,
                     linewidth=0.5)
    plot2 = plt.plot(range(startPoint),
                     outSeq[:startPoint],
                     label='1-step predictions',
                     color='green',
                     marker='.',
                     linestyle='--',
                     markersize=1.5,
                     linewidth=1)

    plot3 = plt.plot(range(startPoint, endPoint, 1),
                     outSeq[startPoint:],
                     label='Multi-step predictions',
                     color='blue',
                     marker='.',
                     linestyle='--',
                     markersize=1.5,
                     linewidth=1)
    plt.xlim([1500, endPoint])
    plt.xlabel('Index', fontsize=15)
    plt.ylabel('Value', fontsize=15)

    plt.title('Time-series Prediction on ' + args.data + ' Dataset',
              fontsize=18,
              fontweight='bold')
    plt.legend()
    plt.tight_layout()
    plt.text(1520, 32000, 'Epoch: ' + str(epoch), fontsize=15)
    plt.savefig('result/nyc_taxi/fig_epoch' + str(epoch) + '.png')
    #plt.show()
    plt.close()

    return outSeq