Пример #1
0
def main():
    parser = argparse.ArgumentParser(description="Visualize AdmiralNet")
    parser.add_argument("--model_file", type=str, required=True)
    parser.add_argument("--input_file", type=str, required=True)
    parser.add_argument("--layer", type=str, required=True)
    args = parser.parse_args()
    
    model_dir, model_file = os.path.split(args.model_file)
    config_path = os.path.join(model_dir,'config.pkl')
    config_file = open(config_path,'rb')
    config = pickle.load(config_file)
    print(config)

    gpu = int(config['gpu'])
    use_float32 = bool(config['use_float32'])
    label_scale = float(config['label_scale'])
    size = (66,200)
    context_length = int(config['context_length'])
    sequence_length = int(config['sequence_length'])
    hidden_dim = int(config['hidden_dim'])
    optical_flow = bool(config.get('optical_flow',''))
    
    if(optical_flow):
        prvs = load_image(args.input_file).astype(np.float32) / 255.0
        prvs_grayscale = cv2.cvtColor(prvs,cv2.COLOR_BGR2GRAY)
        prvs_resize = cv2.resize(prvs_grayscale, (self.im_size[1], self.im_size[0]), interpolation = cv2.INTER_CUBIC)
        flow = cv2.calcOpticalFlowFarneback(prvs_resize,next_resize, None, 0.5, 3, 20, 8, 5, 1.2, 0)
        inputfile=flow.transpose(2, 0, 1)
        #inputfile=inputfile.reshape(-1,2,1825,300)    
    else:
        inputfile = load_image(args.input_file).astype(np.float32) / 255.0
        #inputfile=inputfile.reshape(-1,3,1825,300)

    network = models.AdmiralNet(cell='lstm',context_length = context_length, sequence_length=sequence_length, hidden_dim = hidden_dim, use_float32 = use_float32, gpu = gpu, optical_flow=optical_flow)
    state_dict = torch.load(args.model_file)
    network.load_state_dict(state_dict)
    print(network)

    get_fmaps(network,inputfile,int(args.layer))
Пример #2
0
def main():
    parser = argparse.ArgumentParser(description="Test AdmiralNet")
    parser.add_argument("--model_file", type=str, required=True)
    parser.add_argument("--annotation_file", type=str, required=True)
    parser.add_argument("--write_images", action="store_true")
    parser.add_argument("--plot", action="store_true")
    args = parser.parse_args()

    annotation_dir, annotation_file = os.path.split(args.annotation_file)
    model_dir, model_file = os.path.split(args.model_file)
    config_path = os.path.join(model_dir, 'config.pkl')
    config_file = open(config_path, 'rb')
    config = pickle.load(config_file)
    print(config)
    model_prefix, _ = model_file.split(".")
    # return

    gpu = int(config['gpu'])
    use_float32 = bool(config['use_float32'])
    label_scale = float(config['label_scale'])
    size = (66, 200)
    prefix, _ = annotation_file.split(".")
    prefix = prefix + config['file_prefix']
    context_length = int(config['context_length'])
    sequence_length = int(config['sequence_length'])
    hidden_dim = int(config['hidden_dim'])
    optical_flow = bool(config.get('optical_flow', ''))
    rnn_cell_type = 'lstm'
    network = models.AdmiralNet(cell=rnn_cell_type,
                                context_length=context_length,
                                sequence_length=sequence_length,
                                hidden_dim=hidden_dim,
                                use_float32=use_float32,
                                gpu=gpu,
                                optical_flow=optical_flow)
    state_dict = torch.load(args.model_file)
    network.load_state_dict(state_dict)
    print(network)
    #result_data=[]
    if (label_scale == 1.0):
        label_transformation = None
    else:
        label_transformation = transforms.Compose(
            [transforms.Lambda(lambda inputs: inputs.mul(label_scale))])
    if (use_float32):
        network.float()
        trainset = loaders.F1SequenceDataset(annotation_dir,annotation_file,(66,200),\
        context_length=context_length, sequence_length=sequence_length, use_float32=True, label_transformation = label_transformation, optical_flow=optical_flow)
    else:
        network.double()
        trainset = loaders.F1SequenceDataset(annotation_dir, annotation_file,(66,200),\
        context_length=context_length, sequence_length=sequence_length, label_transformation = label_transformation, optical_flow=optical_flow)

    if (gpu >= 0):
        network = network.cuda(gpu)
    if optical_flow:
        if ((not os.path.isfile("./" + prefix + "_opticalflows.pkl"))
                or (not os.path.isfile("./" + prefix +
                                       "_opticalflowannotations.pkl"))):
            trainset.read_files_flow()
            trainset.write_pickles(prefix + "_opticalflows.pkl",
                                   prefix + "_opticalflowannotations.pkl")
        else:
            trainset.read_pickles(prefix + "_opticalflows.pkl",
                                  prefix + "_opticalflowannotations.pkl")
    else:
        if ((not os.path.isfile("./" + prefix + "_images.pkl"))
                or (not os.path.isfile("./" + prefix + "_annotations.pkl"))):
            trainset.read_files()
            trainset.write_pickles(prefix + "_images.pkl",
                                   prefix + "_annotations.pkl")
        else:
            trainset.read_pickles(prefix + "_images.pkl",
                                  prefix + "_annotations.pkl")

    trainset.img_transformation = config['image_transformation']
    loader = torch.utils.data.DataLoader(trainset,
                                         batch_size=1,
                                         shuffle=False,
                                         num_workers=0)
    cum_diff = 0.0
    t = tqdm(enumerate(loader))
    network.eval()
    predictions = []
    if args.write_images:
        imdir = "admiralnet_prediction_images_" + model_prefix
        os.mkdir(imdir)
        annotation_file = open(args.annotation_file, 'r')
        annotations = annotation_file.readlines()
        annotation_file.close()
        im, _, _, _, _ = annotations[0].split(",")
        background = cv2.imread(
            os.path.join(annotation_dir, 'raw_images_full', im),
            cv2.IMREAD_UNCHANGED)
        out_size = background.shape
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        videoout = cv2.VideoWriter(os.path.join(imdir, "video.avi"), fourcc,
                                   30.0, (out_size[1], out_size[0]), True)
        wheel_pred = cv2.imread('predicted_fixed.png', cv2.IMREAD_UNCHANGED)
        wheelrows_pred = 65
        wheelcols_pred = 65
        wheel_pred = cv2.resize(wheel_pred, (wheelcols_pred, wheelrows_pred),
                                interpolation=cv2.INTER_CUBIC)
    for idx, (inputs, throttle, brake, _, labels) in t:
        if (gpu >= 0):
            inputs = inputs.cuda(gpu)
            throttle = throttle.cuda(gpu)
            brake = brake.cuda(gpu)
            labels = labels.cuda(gpu)
        pred = torch.div(network(inputs, throttle, brake), label_scale)
        #result_data.append([labels,pred])
        if pred.shape[1] == 1:
            angle = pred.item()
        else:
            angle = pred.squeeze()[0].item()
        predictions.append(angle)
        t.set_postfix(angle=angle)
        if args.write_images:
            scaled_pred_angle = 180.0 * angle
            M_pred = cv2.getRotationMatrix2D(
                (wheelrows_pred / 2, wheelcols_pred / 2), scaled_pred_angle, 1)
            wheel_pred_rotated = cv2.warpAffine(
                wheel_pred, M_pred, (wheelrows_pred, wheelcols_pred))
            numpy_im = np.transpose(trainset.images[idx],
                                    (1, 2, 0)).astype(np.float32)
            # print(numpy_im.shape)
            im, _, _, _, _ = annotations[idx].split(",")
            background = cv2.imread(
                os.path.join(annotation_dir, 'raw_images_full', im),
                cv2.IMREAD_UNCHANGED)
            out_size = background.shape

            font = cv2.FONT_HERSHEY_SIMPLEX
            bottomLeftCornerOfText = (int(
                (out_size[1] - wheelcols_pred) / 2) - 15,
                                      int((out_size[0] - wheelcols_pred) / 2) -
                                      55)
            fontScale = 0.45
            fontColor = (0, 0, 0)
            lineType = 1

            overlay = background.copy()
            cv2.rectangle(overlay,
                          (int((out_size[1] - wheelcols_pred) / 2) - 20,
                           int((out_size[0] - wheelcols_pred) / 2) - 53),
                          (int((out_size[1] - wheelcols_pred) / 2) + 100,
                           int((out_size[0] - wheelcols_pred) / 2) - 67),
                          (255, 255, 255, 0.2), -1)

            alpha = 0.5
            cv2.addWeighted(overlay, alpha, background, 1 - alpha, 0,
                            background)

            cv2.putText(background, 'Predicted:' + "{0:.2f}".format(angle),
                        bottomLeftCornerOfText, font, fontScale, fontColor,
                        lineType)

            overlayed_pred = imutils.annotation_utils.overlay_image(
                background, wheel_pred_rotated,
                int((out_size[1] - wheelcols_pred) / 2) + 10,
                int((out_size[0] - wheelcols_pred) / 2) - 150)

            name = "ouput_image_" + str(idx) + ".png"
            output_path = os.path.join(imdir, name)
            cv2.imwrite(output_path, overlayed_pred)
            videoout.write(overlayed_pred)
    predictions_array = np.array(predictions)
    log_name = "ouput_log.txt"
    imdir = "admiralnet_prediction_images_" + model_prefix
    if (os.path.exists(imdir) == False):
        os.mkdir(imdir)
    log_output_path = os.path.join(imdir, log_name)
    log = predictions_array
    with open(log_output_path, "a") as myfile:
        for x in log:
            myfile.write("{0}\n".format(x))

    if args.plot:
        fig = plt.figure()
        ax = plt.subplot(111)
        t = np.linspace(0, len(loader) - 1, len(loader))
        ax.plot(t, predictions_array, 'r', label='Predicted')
        ax.legend()
        plt.savefig("admiralnet_prediction_images_" + model_prefix +
                    "\plot.jpeg")
        plt.show()
Пример #3
0
def main():

    parser = argparse.ArgumentParser(
        description="Steering prediction with PilotNet")

    parser.add_argument("--config_file",
                        type=str,
                        required=True,
                        help="Config file to use")

    args = parser.parse_args()

    config_fp = args.config_file

    config = load_config(config_fp)

    #mandatory parameters

    learning_rate = float(config['learning_rate'])

    root_dir, annotation_file = os.path.split(config['annotation_file'])

    prefix, _ = annotation_file.split(".")

    #optional parameters

    file_prefix = config['file_prefix']

    checkpoint_file = config['checkpoint_file']

    load_files = bool(config['load_files'])

    use_float32 = bool(config['use_float32'])

    optical_flow = bool(config['optical_flow'])

    apply_norm = bool(config['apply_normalization'])

    label_scale = float(config['label_scale'])

    momentum = float(config['momentum'])

    batch_size = int(config['batch_size'])

    gpu = int(config['gpu'])

    epochs = int(config['epochs'])

    workers = int(config['workers'])

    context_length = int(config['context_length'])

    sequence_length = int(config['sequence_length'])

    hidden_dim = int(config['hidden_dim'])

    _, config_file = os.path.split(config_fp)

    config_file_name, _ = config_file.split(".")

    output_dir = config_file_name.replace("\n", "")

    prefix = prefix + file_prefix

    rnn_cell_type = 'lstm'

    output_dir = output_dir + "_" + rnn_cell_type

    network = models.AdmiralNet(cell=rnn_cell_type,
                                context_length=context_length,
                                sequence_length=sequence_length,
                                hidden_dim=hidden_dim,
                                use_float32=use_float32,
                                gpu=gpu)

    starting_epoch = 0

    if (checkpoint_file != ''):

        dir, file = os.path.split(checkpoint_file)

        _, ep = file.split("epoch")

        num, ext = ep.split(".")

        starting_epoch = int(num)

        print("Starting Epoch number:", starting_epoch)

        state_dict = torch.load(checkpoint_file)

        network.load_state_dict(state_dict)

    if (label_scale == 1.0):

        label_transformation = None

    else:

        label_transformation = transforms.Compose(
            [transforms.Lambda(lambda inputs: inputs.mul(label_scale))])

    if (use_float32):

        network.float()

        trainset = loaders.F1SequenceDataset(root_dir,annotation_file,(66,200),\

        context_length=context_length, sequence_length=sequence_length, use_float32=True, label_transformation = label_transformation, optical_flow = optical_flow)

    else:

        network.double()

        trainset = loaders.F1SequenceDataset(root_dir, annotation_file,(66,200),\

        context_length=context_length, sequence_length=sequence_length, label_transformation = label_transformation, optical_flow = optical_flow)

    if (gpu >= 0):

        network = network.cuda(gpu)

# trainset.read_files()

    if optical_flow:

        if (load_files
                or (not os.path.isfile("./" + prefix + "_opticalflows.pkl"))
                or (not os.path.isfile("./" + prefix +
                                       "_opticalflowannotations.pkl"))):

            trainset.read_files_flow()

            trainset.write_pickles(prefix + "_opticalflows.pkl",
                                   prefix + "_opticalflowannotations.pkl")

        else:

            trainset.read_pickles(prefix + "_opticalflows.pkl",
                                  prefix + "_opticalflowannotations.pkl")

    else:

        if (load_files or (not os.path.isfile("./" + prefix + "_images.pkl"))
                or (not os.path.isfile("./" + prefix + "_annotations.pkl"))):

            trainset.read_files()

            trainset.write_pickles(prefix + "_images.pkl",
                                   prefix + "_annotations.pkl")

        else:

            trainset.read_pickles(prefix + "_images.pkl",
                                  prefix + "_annotations.pkl")

    im = trainset.images[66]

    im = im.transpose(1, 2, 0)

    #cv2.namedWindow("im",cv2.WINDOW_AUTOSIZE)
    #cv2.imshow("im",im/255.0)
    # cv2.waitKey(0)
    # cv2.destroyWindow("im")

    print("Dataset has type: " + str(im.dtype))

    # cv2.imwrite("img.jpg",im)

    if apply_norm:
        mean, stdev = trainset.statistics()

        mean_ = torch.from_numpy(mean).float()

        stdev_ = torch.from_numpy(stdev).float()

        print("Mean")
        print(mean_)
        print("Stdev")
        print(stdev_)

        trainset.img_transformation = transforms.Normalize(mean_, stdev_)
    else:
        print("Skipping Normalize")
        trainset.img_transformation = None

# trainset.img_transformation = transforms.Normalize([2.5374081e-06, -3.1837547e-07] , [3.0699273e-05, 5.9349504e-06])

    config['image_transformation'] = trainset.img_transformation

    config['label_transformation'] = trainset.label_transformation

    print("Using configuration: ", config)

    if (not os.path.isdir(output_dir)):

        os.makedirs(output_dir)

    config_dump = open(os.path.join(output_dir, "config.pkl"), 'wb')

    pickle.dump(config, config_dump)

    config_dump.close()

    trainLoader = torch.utils.data.DataLoader(trainset,
                                              batch_size=batch_size,
                                              shuffle=True,
                                              num_workers=workers)

    print(trainLoader)

    #Definition of our loss.

    criterion = nn.MSELoss()

    # Definition of optimization strategy.

    optimizer = optim.SGD(network.parameters(),
                          lr=learning_rate,
                          momentum=momentum)

    losses = train_model(network,
                         criterion,
                         optimizer,
                         trainLoader,
                         rnn_cell_type,
                         prefix,
                         output_dir,
                         n_epochs=epochs,
                         gpu=gpu,
                         starting_epoch=starting_epoch)

    if (optical_flow):

        loss_path = os.path.join(output_dir,
                                 "" + prefix + "_" + rnn_cell_type + "_OF.txt")

    else:

        loss_path = os.path.join(output_dir,
                                 "" + prefix + "_" + rnn_cell_type + ".txt")

    f = open(loss_path, "w")

    f.write("\n".join(map(lambda x: str(x), losses)))

    f.close()
Пример #4
0
def main():
    parser = argparse.ArgumentParser(description="Test AdmiralNet")
    parser.add_argument("--model_file", type=str, required=True)
    args = parser.parse_args()

    model_dir, model_file = os.path.split(args.model_file)
    config_path = os.path.join(model_dir, 'config.pkl')
    config_file = open(config_path, 'rb')
    config = pickle.load(config_file)
    model_prefix, _ = model_file.split(".")

    gpu = int(config['gpu'])
    use_float32 = bool(config['use_float32'])
    label_scale = float(config['label_scale'])
    context_length = int(config['context_length'])
    sequence_length = int(config['sequence_length'])
    hidden_dim = int(config['hidden_dim'])
    optical_flow = bool(config.get('optical_flow', ''))
    rnn_cell_type = 'lstm'
    network = models.AdmiralNet(cell=rnn_cell_type,
                                context_length=context_length,
                                sequence_length=sequence_length,
                                hidden_dim=hidden_dim,
                                use_float32=use_float32,
                                gpu=gpu,
                                optical_flow=optical_flow)
    state_dict = torch.load(args.model_file)
    network.load_state_dict(state_dict)
    network = network.float()
    network = network.cuda(0)
    print(network)
    vjoy_max = 32000

    throttle = torch.Tensor(1, 10)
    brake = torch.Tensor(1, 10)
    if (use_float32):
        network.float()
    else:
        network.double()
    if (gpu >= 0):
        network = network.cuda(gpu)
    network.eval()
    vj = py_vjoy.vJoy()
    vj.capture(1)  #1 is the device ID
    vj.reset()
    js = py_vjoy.Joystick()
    js.setAxisXRot(int(round(vjoy_max / 2)))
    js.setAxisYRot(int(round(vjoy_max / 2)))
    vj.update(js)
    time.sleep(2)
    inputs = []
    '''
    '''
    wheel_pred = cv2.imread('predicted_fixed.png', cv2.IMREAD_UNCHANGED)
    wheelrows_pred = 66
    wheelcols_pred = 66
    wheel_pred = cv2.resize(wheel_pred, (wheelcols_pred, wheelrows_pred),
                            interpolation=cv2.INTER_CUBIC)
    buffer = numpy_ringbuffer.RingBuffer(capacity=context_length,
                                         dtype=(np.float32, (2, 66, 200)))

    dt = 12
    context_length = 10
    debug = True
    app = "F1 2017"
    dl = pyf1_datalogger.ScreenVideoCapture()
    dl.open(app, 0, 200, 1700, 300)
    interp = cv2.INTER_AREA
    if debug:
        cv2.namedWindow(app, cv2.WINDOW_AUTOSIZE)
    pscreen = fill_buffer(buffer,
                          dl,
                          dt=dt,
                          context_length=context_length,
                          interp=interp)
    buffer_torch = torch.rand(1, 10, 2, 66, 200).float()
    buffer_torch = buffer_torch.cuda(0)
    while (True):
        cv2.waitKey(dt)
        screen = grab_screen(dl)
        screen_grey = cv2.cvtColor(screen, cv2.COLOR_BGR2GRAY)
        screen_grey = cv2.resize(screen_grey, (200, 66), interpolation=interp)
        flow = cv2.calcOpticalFlowFarneback(pscreen, screen_grey, None, 0.5, 3,
                                            20, 8, 5, 1.2, 0)
        im = flow.transpose(2, 0, 1).astype(np.float32)
        buffer.append(im)
        pscreen = screen_grey
        buffer_torch[0] = torch.from_numpy(np.array(buffer))
        #print("Input Size: " + str(buffer_torch.size()))
        outputs = network(buffer_torch, throttle=None, brake=None)
        angle = outputs[0][0].item()
        print("Output: " + str(angle))
        scaled_pred_angle = 180.0 * angle + 7
        M_pred = cv2.getRotationMatrix2D(
            (wheelrows_pred / 2, wheelcols_pred / 2), scaled_pred_angle, 1)
        wheel_pred_rotated = cv2.warpAffine(wheel_pred, M_pred,
                                            (wheelrows_pred, wheelcols_pred))
        background = screen
        out_size = background.shape
        print(out_size)
        print(wheel_pred_rotated.shape)
        overlayed_pred = imutils.annotation_utils.overlay_image(
            background, wheel_pred_rotated,
            int((out_size[1] - wheelcols_pred) / 2),
            int((out_size[0] - wheelcols_pred) / 2))
        if debug:
            cv2.imshow(app, overlayed_pred)
        vjoy_angle = -angle * vjoy_max + vjoy_max / 2.0
        js.setAxisXRot(int(round(vjoy_angle)))
        js.setAxisYRot(int(round(vjoy_angle)))
        vj.update(js)
        '''
        '''

    print(buffer.shape)
def main():
    parser = argparse.ArgumentParser(description="Test AdmiralNet")
    parser.add_argument("--model_file", type=str, required=True)
    parser.add_argument("--annotation_file", type=str, required=True)
    parser.add_argument("--write_images", action="store_true")
    parser.add_argument("--plot", action="store_true")
    args = parser.parse_args()

    annotation_dir, annotation_file = os.path.split(args.annotation_file)
    model_dir, model_file = os.path.split(args.model_file)
    config_path = os.path.join(model_dir, 'config.pkl')
    config_file = open(config_path, 'rb')
    config = pickle.load(config_file)
    print(config)
    model_prefix, _ = model_file.split(".")
    # return

    gpu = int(config['gpu'])
    use_float32 = bool(config['use_float32'])
    label_scale = float(config['label_scale'])
    #size = (66,200)
    prefix, _ = annotation_file.split(".")
    prefix = prefix + config['file_prefix']
    context_length = int(config['context_length'])
    sequence_length = int(config['sequence_length'])
    hidden_dim = int(config['hidden_dim'])
    optical_flow = bool(config.get('optical_flow', ''))
    rnn_cell_type = 'lstm'
    network = models.AdmiralNet(cell=rnn_cell_type,
                                context_length=context_length,
                                sequence_length=sequence_length,
                                hidden_dim=hidden_dim,
                                use_float32=use_float32,
                                gpu=gpu)
    state_dict = torch.load(args.model_file)
    network.load_state_dict(state_dict)
    print(network)
    #result_data=[]
    if (label_scale == 1.0):
        label_transformation = None
    else:
        label_transformation = transforms.Compose(
            [transforms.Lambda(lambda inputs: inputs.mul(label_scale))])
    if (use_float32):
        network.float()
        trainset = loaders.F1SequenceDataset(annotation_dir,annotation_file,(66,200),\
        context_length=context_length, sequence_length=sequence_length, use_float32=True, label_transformation = label_transformation, optical_flow=optical_flow)
    else:
        network.double()
        trainset = loaders.F1SequenceDataset(annotation_dir, annotation_file,(66,200),\
        context_length=context_length, sequence_length=sequence_length, label_transformation = label_transformation, optical_flow=optical_flow)

    if (gpu >= 0):
        network = network.cuda(gpu)

    pickle_dir, _ = annotation_file.split('.')
    pickle_dir += '_data'
    if optical_flow:
        load_files = glob.glob(pickle_dir + '\saved_image_opticalflow*.pkl')
    else:
        load_files = glob.glob(pickle_dir + '\saved_image*.pkl')
    if (len(load_files) == 0):
        if optical_flow:
            trainset.read_files_flow()
            load_files = glob.glob(pickle_dir +
                                   '\saved_image_opticalflow*.pkl')
        else:
            trainset.read_files()
            load_files = glob.glob(pickle_dir + '\saved_image*.pkl')
    load_files.sort()
    predictions = []
    ground_truths = []
    losses = []
    criterion = nn.MSELoss()
    cum_diff = 0.0
    if (gpu >= 0):
        criterion = criterion.cuda(gpu)
    network.eval()
    for file in load_files:
        #Load partitioned Dataset
        if optical_flow:
            dir, file = file.split('\\')
            prefix, data_type, op, suffix = file.split('_')
            data_type = 'labels'
            label_file = prefix + '_' + data_type + '_' + op + '_' + suffix
        else:
            dir, file = file.split('\\')
            prefix, data_type, suffix = file.split('_')
            data_type = 'labels'
            label_file = prefix + '_' + data_type + '_' + suffix
        trainset.read_pickles(os.path.join(dir, file),
                              os.path.join(dir, label_file))
        trainset.img_transformation = config['image_transformation']
        loader = torch.utils.data.DataLoader(trainset,
                                             batch_size=1,
                                             shuffle=False,
                                             num_workers=0)

        t = tqdm(enumerate(loader))
        if args.write_images:
            imdir = "admiralnet_prediction_images_" + model_prefix
            if (not os.path.exists(imdir)):
                os.mkdir(imdir)
            annotation_file = open(args.annotation_file, 'r')
            annotations = annotation_file.readlines()
            annotation_file.close()
            im, _, _, _, _ = annotations[0].split(",")
            background = cv2.imread(
                os.path.join(annotation_dir, 'raw_images', im),
                cv2.IMREAD_UNCHANGED)
            out_size = background.shape
            fourcc = cv2.VideoWriter_fourcc(*'XVID')
            fps = 60
            videoout = cv2.VideoWriter(os.path.join(imdir,
                                                    "video.avi"), fourcc, fps,
                                       (out_size[1], out_size[0]), True)
            wheel_pred = cv2.imread('predicted_fixed.png',
                                    cv2.IMREAD_UNCHANGED)
            wheel_ground = cv2.imread('ground_truth_fixed.png',
                                      cv2.IMREAD_UNCHANGED)
            wheelrows_pred = 65
            wheelcols_pred = 65
            wheel_pred = cv2.resize(wheel_pred,
                                    (wheelcols_pred, wheelrows_pred),
                                    interpolation=cv2.INTER_CUBIC)
            wheelrows_ground = 65
            wheelcols_ground = 65
            wheel_ground = cv2.resize(wheel_ground,
                                      (wheelcols_ground, wheelrows_ground),
                                      interpolation=cv2.INTER_CUBIC)
        for idx, (inputs, throttle, brake, _, labels, flag) in t:
            if (all(flag.numpy())):
                if (gpu >= 0):
                    inputs = inputs.cuda(gpu)
                    throttle = throttle.cuda(gpu)
                    brake = brake.cuda(gpu)
                    labels = labels.cuda(gpu)
                pred = torch.div(network(inputs, throttle, brake), label_scale)
                #result_data.append([labels,pred])
                if pred.shape[1] == 1:
                    angle = pred.item()
                    ground_truth = labels.item()
                else:
                    angle = pred.squeeze()[0].item()
                    ground_truth = labels.squeeze()[0].item()
                predictions.append(angle)
                ground_truths.append(ground_truth)
                loss = criterion(pred, labels)
                losses.append(loss.item())
                t.set_postfix(angle=angle, ground_truth=ground_truth)
                #print("Ground Truth: %f. Prediction: %f.\n" %(scaled_ground_truth, scaled_angle))
                if args.write_images:
                    scaled_pred_angle = 180.0 * angle
                    scaled_truth_angle = 180.0 * ground_truth
                    M_pred = cv2.getRotationMatrix2D(
                        (wheelrows_pred / 2, wheelcols_pred / 2),
                        scaled_pred_angle, 1)
                    wheel_pred_rotated = cv2.warpAffine(
                        wheel_pred, M_pred, (wheelrows_pred, wheelcols_pred))
                    M_ground = cv2.getRotationMatrix2D(
                        (wheelrows_ground / 2, wheelcols_ground / 2),
                        scaled_truth_angle, 1)
                    wheel_ground_rotated = cv2.warpAffine(
                        wheel_ground, M_ground,
                        (wheelrows_ground, wheelcols_ground))
                    numpy_im = np.transpose(trainset.images[idx],
                                            (1, 2, 0)).astype(np.float32)
                    #print(numpy_im.shape)
                    im, _, _, _, _ = annotations[idx].split(",")
                    background = cv2.imread(
                        os.path.join(annotation_dir, 'raw_images', im),
                        cv2.IMREAD_UNCHANGED)
                    out_size = background.shape

                    font = cv2.FONT_HERSHEY_SIMPLEX
                    bottomLeftCornerOfText = (
                        int((out_size[1] - wheelcols_pred) / 2) - 90,
                        int((out_size[0] - wheelcols_pred) / 3) - 25)
                    bottomLeftCornerOfText2 = (
                        int((out_size[1] - wheelcols_pred) / 2) + 40,
                        int((out_size[0] - wheelcols_pred) / 3) - 25)
                    fontScale = 0.45
                    fontColor = (0, 0, 0)
                    lineType = 1

                    overlay = background.copy()
                    cv2.rectangle(
                        overlay,
                        (int((out_size[1] - wheelcols_pred) / 2) - 95,
                         int((out_size[0] - wheelcols_pred) / 3) - 23),
                        (int((out_size[1] - wheelcols_pred) / 2) + 25,
                         int((out_size[0] - wheelcols_pred) / 3) - 37),
                        (255, 255, 255, 0.2), -1)
                    cv2.rectangle(
                        overlay,
                        (int((out_size[1] - wheelcols_pred) / 2) + 35,
                         int((out_size[0] - wheelcols_pred) / 3) - 23),
                        (int((out_size[1] - wheelcols_pred) / 2) + 180,
                         int((out_size[0] - wheelcols_pred) / 3) - 37),
                        (255, 255, 255, 0.2), -1)

                    alpha = 0.5
                    cv2.addWeighted(overlay, alpha, background, 1 - alpha, 0,
                                    background)

                    cv2.putText(background,
                                'Predicted:' + "{0:.2f}".format(angle),
                                bottomLeftCornerOfText, font, fontScale,
                                fontColor, lineType)
                    cv2.putText(
                        background,
                        'Ground Truth:' + "{0:.2f}".format(ground_truth),
                        bottomLeftCornerOfText2, font, fontScale, fontColor,
                        lineType)

                    #print(background.shape)
                    overlayed_pred = imutils.annotation_utils.overlay_image(
                        background, wheel_pred_rotated,
                        int((out_size[1] - wheelcols_pred) / 2) - 60,
                        int((out_size[0] - wheelcols_pred) / 3))
                    overlayed_ground = imutils.annotation_utils.overlay_image(
                        overlayed_pred, wheel_ground_rotated,
                        int((out_size[1] - wheelcols_ground) / 2) + 75,
                        int((out_size[0] - wheelcols_ground) / 3))

                    name = "ouput_image_" + str(idx) + ".png"
                    output_path = os.path.join(imdir, name)
                    cv2.imwrite(output_path, overlayed_ground)
                    videoout.write(overlayed_ground)
            else:
                break
    predictions_array = np.array(predictions)
    ground_truths_array = np.array(ground_truths)
    log_name = "ouput_log.txt"
    imdir = "admiralnet_prediction_images_" + model_prefix
    if (os.path.exists(imdir) == False):
        os.mkdir(imdir)
    log_output_path = os.path.join(imdir, log_name)
    log = list(zip(ground_truths_array, predictions_array))
    with open(log_output_path, "a") as myfile:
        for x in log:
            log_item = [x[0], x[1]]
            myfile.write("{0},{1}\n".format(log_item[0], log_item[1]))
    diffs = np.subtract(predictions_array, ground_truths_array)
    rms = np.sqrt(np.mean(np.array(losses)))
    nrms = np.sqrt(
        np.mean(
            np.divide(
                np.square(np.array(losses)),
                np.multiply(np.mean(np.array(predictions)),
                            np.mean(np.array(ground_truths))))))
    print("RMS Error: ", rms)
    print("NRMS Error: ", nrms)

    if args.plot:
        fig = plt.figure()
        ax = plt.subplot(111)
        t = np.linspace(0, len(predictions_array) - 1, len(predictions_array))
        ax.plot(t, predictions_array, 'r', label='Predicted')
        ax.plot(t, ground_truths_array, 'b', label='Ground Truth')
        ax.legend()
        ax.set_xlabel("Frames")
        ax.set_ylabel("Steering")
        plt.savefig("admiralnet_prediction_images_" + model_prefix +
                    "\plot.jpeg")
        plt.show()