def main(args):
    # read all the images in the folder
    image_list = glob.glob(args.data_dir + os.sep + '*.' + args.img_extn)

    up = None
    if args.modelType == 2:
        up = torch.nn.Upsample(scale_factor=16, mode='bilinear')
    else:
        up = torch.nn.Upsample(scale_factor=2, mode='bilinear')
    if args.gpu:
        up = up.cuda()

    p = args.p
    q = args.q
    classes = args.classes
    if args.modelType == 2:
        modelA = Net.ESPNet_Encoder(
            classes, p, q)  # Net.Mobile_SegNetDilatedIA_C_stage1(20)
        model_weight_file = args.weightsDir + os.sep + 'encoder' + os.sep + 'espnet_p_' + str(
            p) + '_q_' + str(q) + '.pth'
        if not os.path.isfile(model_weight_file):
            print(
                'Pre-trained model file does not exist. Please check ../pretrained/encoder folder'
            )
            exit(-1)
        modelA.load_state_dict(torch.load(model_weight_file))
    elif args.modelType == 1:
        modelA = Net.ESPNet(classes, p,
                            q)  # Net.Mobile_SegNetDilatedIA_C_stage1(20)
        model_weight_file = args.weightsDir + os.sep + 'decoder' + os.sep + 'espnet_p_' + str(
            p) + '_q_' + str(q) + '.pth'
        if not os.path.isfile(model_weight_file):
            print(
                'Pre-trained model file does not exist. Please check ../pretrained/decoder folder'
            )
            exit(-1)
        modelA.load_state_dict(torch.load(model_weight_file))
    else:
        print('Model not supported')
    # modelA = torch.nn.DataParallel(modelA)
    if args.gpu:
        modelA = modelA.cuda()

    # set to evaluation mode
    modelA.eval()

    if not os.path.isdir(args.savedir):
        os.mkdir(args.savedir)

    evaluateModel(args, modelA, up, image_list)
def main(args):
    classes = args.classes
    p = args.p
    q = args.q
    if args.modelType == 2:
        model = Net.ESPNet_Encoder(
            classes, p, q)  # Net.Mobile_SegNetDilatedIA_C_stage1(20)
        model_weight_file = args.weightsDir + os.sep + 'encoder' + os.sep + 'espnet_p_' + str(
            p) + '_q_' + str(q) + '.pth'
        if not os.path.isfile(model_weight_file):
            print(
                'Pre-trained model file does not exist. Please check ../pretrained/encoder folder'
            )
            exit(-1)
        model.load_state_dict(torch.load(model_weight_file))
    elif args.modelType == 1:
        model = Net.ESPNet(classes, p,
                           q)  # Net.Mobile_SegNetDilatedIA_C_stage1(20)
        model_weight_file = args.weightsDir + os.sep + 'decoder' + os.sep + 'espnet_p_' + str(
            p) + '_q_' + str(q) + '.pth'
        if not os.path.isfile(model_weight_file):
            print(
                'Pre-trained model file does not exist. Please check ../pretrained/decoder folder'
            )
            exit(-1)
        model.load_state_dict(torch.load(model_weight_file))
    else:
        print('Model not supported')

    if (not args.cpu):
        model = model.cuda(
        )  #.half()    #HALF seems to be doing slower for some reason
    #model = torch.nn.DataParallel(model).cuda()

    model.eval()

    images = torch.randn(args.batch_size, args.num_channels, args.height,
                         args.width)

    if (not args.cpu):
        images = images.cuda()  #.half()

    time_train = []

    i = 0

    while (True):
        #for step, (images, labels, filename, filenameGt) in enumerate(loader):

        start_time = time.time()

        inputs = Variable(images, volatile=True)
        outputs = model(inputs)

        #preds = outputs.cpu()
        if (not args.cpu):
            torch.cuda.synchronize(
            )  #wait for cuda to finish (cuda is asynchronous!)

        if i != 0:  #first run always takes some time for setup
            fwt = time.time() - start_time
            time_train.append(fwt)
            print("Forward time per img (b=%d): %.3f (Mean: %.3f)" %
                  (args.batch_size, fwt / args.batch_size,
                   sum(time_train) / len(time_train) / args.batch_size))

        time.sleep(1)  #to avoid overheating the GPU too much
        i += 1