def evaluate_model(eval_model, detector_name):
    cfg = get_configuration(detector_name)
    cfg['NUM_CHANNELS'] = 3
    print("Map file = ", cfg["DATA"].CLASS_MAP_FILE)
    cfg["DATA"].CLASSES = parse_class_map_file(
        os.path.join("Steer_Bad_Relevant_output", cfg["DATA"].CLASS_MAP_FILE))
    cfg["DATA"].NUM_CLASSES = len(cfg["DATA"].CLASSES)

    # detect objects in single image
    img_path = os.path.join(
        os.path.dirname(os.path.abspath(__file__)),
        r"Steer_Bad_Relevant_output/testImages/Steer_Bad_Front_Zoom (269).jpg")
    regressed_rois, cls_probs = od.evaluate_single_image(
        eval_model, img_path, cfg)
    bboxes, labels, scores = od.filter_results(regressed_rois, cls_probs, cfg)

    fg_boxes = np.where(labels > 0)
    print("#bboxes: before nms: {}, after nms: {}, foreground: {}".format(
        len(regressed_rois), len(bboxes), len(fg_boxes[0])))
    for i in fg_boxes[0]:
        print("{:<12} (label: {:<2}), score: {:.3f}, box: {}".format(
            cfg["DATA"].CLASSES[labels[i]], labels[i], scores[i],
            [int(v) for v in bboxes[i]]))

    od.visualize_results(img_path,
                         bboxes,
                         labels,
                         scores,
                         cfg,
                         store_to_path="Steer_Bad_Relevant_output/output.jpg")
def prepare_train(cfg, use_arg_parser=True):
    cfg.MB_SIZE = 1
    cfg.NUM_CHANNELS = 3
    cfg.OUTPUT_PATH = "../"
    cfg["DATA"].MAP_FILE_PATH = "../"

    data_path = cfg["DATA"].MAP_FILE_PATH
    if not os.path.isdir(data_path):
        raise RuntimeError("Directory %s does not exist" % data_path)

    cfg["DATA"].CLASS_MAP_FILE = os.path.join(data_path, cfg["DATA"].CLASS_MAP_FILE)
    cfg["DATA"].TRAIN_MAP_FILE = os.path.join(data_path, cfg["DATA"].TRAIN_MAP_FILE)
    #cfg["DATA"].TEST_MAP_FILE = os.path.join(data_path, cfg["DATA"].TEST_MAP_FILE)
    cfg["DATA"].TRAIN_ROI_FILE = os.path.join(data_path, cfg["DATA"].TRAIN_ROI_FILE)
    #cfg["DATA"].TEST_ROI_FILE = os.path.join(data_path, cfg["DATA"].TEST_ROI_FILE)

    cfg['MODEL_PATH'] = os.path.join(cfg.OUTPUT_PATH, "faster_rcnn_eval_{}_{}.model"
                                     .format(cfg["MODEL"].BASE_MODEL, "e2e" if cfg["CNTK"].TRAIN_E2E else "4stage"))
    if(cfg["MODEL"].BASE_MODEL=="VGG16"):
        cfg['BASE_MODEL_PATH'] = "../VGG16_ImageNet_Caffe.model"
    if(cfg["MODEL"].BASE_MODEL=="VGG19"):
    	cfg['BASE_MODEL_PATH'] = "../VGG19_ImageNet_Caffe.model"
    if(cfg["MODEL"].BASE_MODEL=="AlexNet"):
    	cfg['BASE_MODEL_PATH'] = "../AlexNet_ImageNet_Caffe.model"
    
    cfg["DATA"].CLASSES = parse_class_map_file(cfg["DATA"].CLASS_MAP_FILE)
    cfg["DATA"].NUM_CLASSES = len(cfg["DATA"].CLASSES)

    if cfg["CNTK"].FAST_MODE:
        cfg["CNTK"].E2E_MAX_EPOCHS = 1
        cfg["CNTK"].RPN_EPOCHS = 1
        cfg["CNTK"].FRCN_EPOCHS = 1

    np.random.seed(seed=cfg.RND_SEED)
def prepare(cfg, use_arg_parser=True):
    cfg.MB_SIZE = 1
    cfg.NUM_CHANNELS = 3
    cfg.OUTPUT_PATH = os.path.join(cfg.DATA.MAP_FILE_PATH, "results", datetime.now().strftime("%d-%m-%Y-%H-%M"))
    # cfg["DATA"].MAP_FILE_PATH = os.path.join(abs_path, cfg["DATA"].MAP_FILE_PATH)
    running_locally = os.path.exists(cfg["DATA"].MAP_FILE_PATH)
    if not running_locally:
        # disable debug and plot outputs when running on GPU cluster
        cfg["CNTK"].DEBUG_OUTPUT = False
        cfg.VISUALIZE_RESULTS = False

    if use_arg_parser:
        parse_arguments(cfg)

    data_path = cfg["DATA"].MAP_FILE_PATH
    if not os.path.isdir(data_path):
        raise RuntimeError("Directory %s does not exist" % data_path)

    cfg["DATA"].CLASS_MAP_FILE = os.path.join(data_path, cfg["DATA"].CLASS_MAP_FILE)
    cfg["DATA"].TRAIN_MAP_FILE = os.path.join(data_path, cfg["DATA"].TRAIN_MAP_FILE)
    cfg["DATA"].TEST_MAP_FILE = os.path.join(data_path, cfg["DATA"].TEST_MAP_FILE)
    cfg["DATA"].TRAIN_ROI_FILE = os.path.join(data_path, cfg["DATA"].TRAIN_ROI_FILE)
    cfg["DATA"].TEST_ROI_FILE = os.path.join(data_path, cfg["DATA"].TEST_ROI_FILE)

    cfg['MODEL_PATH'] = os.path.join(cfg.OUTPUT_PATH, "faster_rcnn_eval_{}_{}_{}.model"
                                     .format(cfg["MODEL"].BASE_MODEL, "e2e" if cfg["CNTK"].TRAIN_E2E else "4stage",
                                             cfg.DATA.DATASET))
    cfg['BASE_MODEL_PATH'] = os.path.join(cfg['DATA'].MAP_FILE_PATH,
                                          "pretrained-models",
                                          cfg["MODEL"].BASE_MODEL_FILE)

    cfg["DATA"].CLASSES = parse_class_map_file(cfg["DATA"].CLASS_MAP_FILE)
    cfg["DATA"].NUM_CLASSES = len(cfg["DATA"].CLASSES)

    if cfg["CNTK"].FAST_MODE:
        cfg["CNTK"].E2E_MAX_EPOCHS = 1
        cfg["CNTK"].RPN_EPOCHS = 1
        cfg["CNTK"].FRCN_EPOCHS = 1

    if cfg["CNTK"].FORCE_DETERMINISTIC:
        force_deterministic_algorithms()
    np.random.seed(seed=cfg.RND_SEED)

    if False and cfg["CNTK"].DEBUG_OUTPUT:
        # report args
        print("Using the following parameters:")
        print("Flip image       : {}".format(cfg["TRAIN"].USE_FLIPPED))
        print("Train conv layers: {}".format(cfg.TRAIN_CONV_LAYERS))
        print("Random seed      : {}".format(cfg.RND_SEED))
        print("Momentum per MB  : {}".format(cfg["CNTK"].MOMENTUM_PER_MB))
        if cfg["CNTK"].TRAIN_E2E:
            print("E2E epochs       : {}".format(cfg["CNTK"].E2E_MAX_EPOCHS))
        else:
            print("RPN lr factor    : {}".format(cfg["CNTK"].RPN_LR_FACTOR))
            print("RPN epochs       : {}".format(cfg["CNTK"].RPN_EPOCHS))
            print("FRCN lr factor   : {}".format(cfg["CNTK"].FRCN_LR_FACTOR))
            print("FRCN epochs      : {}".format(cfg["CNTK"].FRCN_EPOCHS))
Esempio n. 4
0
def prepare(cfg, use_arg_parser=True):
    cfg.MB_SIZE = 1
    cfg.NUM_CHANNELS = 3
    cfg.OUTPUT_PATH = os.path.join(abs_path, "Output")
    cfg["DATA"].MAP_FILE_PATH = os.path.join(abs_path, cfg["DATA"].MAP_FILE_PATH)
    running_locally = os.path.exists(cfg["DATA"].MAP_FILE_PATH)
    if running_locally:
        os.chdir(cfg["DATA"].MAP_FILE_PATH)
        if not os.path.exists(os.path.join(abs_path, "Output")):
            os.makedirs(os.path.join(abs_path, "Output"))
        if not os.path.exists(os.path.join(abs_path, "Output", cfg["DATA"].DATASET)):
            os.makedirs(os.path.join(abs_path, "Output", cfg["DATA"].DATASET))
    else:
        # disable debug and plot outputs when running on GPU cluster
        cfg["CNTK"].DEBUG_OUTPUT = False
        cfg.VISUALIZE_RESULTS = False

    if use_arg_parser:
        parse_arguments(cfg)

    data_path = cfg["DATA"].MAP_FILE_PATH
    if not os.path.isdir(data_path):
        raise RuntimeError("Directory %s does not exist" % data_path)

    cfg["DATA"].CLASS_MAP_FILE = os.path.join(data_path, cfg["DATA"].CLASS_MAP_FILE)
    cfg["DATA"].TRAIN_MAP_FILE = os.path.join(data_path, cfg["DATA"].TRAIN_MAP_FILE)
    cfg["DATA"].TEST_MAP_FILE = os.path.join(data_path, cfg["DATA"].TEST_MAP_FILE)
    cfg["DATA"].TRAIN_ROI_FILE = os.path.join(data_path, cfg["DATA"].TRAIN_ROI_FILE)
    cfg["DATA"].TEST_ROI_FILE = os.path.join(data_path, cfg["DATA"].TEST_ROI_FILE)
    if cfg.USE_PRECOMPUTED_PROPOSALS:
        try:
            cfg["DATA"].TRAIN_PRECOMPUTED_PROPOSALS_FILE = os.path.join(data_path,
                                                                        cfg["DATA"].TRAIN_PRECOMPUTED_PROPOSALS_FILE)
        except:
            print("To use precomputed proposals please specify the following parameters in your configuration:\n"
                  "__C.DATA.TRAIN_PRECOMPUTED_PROPOSALS_FILE\n"
                  "__C.DATA.TEST_PRECOMPUTED_PROPOSALS_FILE")
            exit(-1)

    cfg['MODEL_PATH'] = os.path.join(cfg.OUTPUT_PATH, "fast_rcnn_eval_{}.model".format(cfg["MODEL"].BASE_MODEL))
    cfg['BASE_MODEL_PATH'] = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "..", "..", "..",
                                          "PretrainedModels",
                                          cfg["MODEL"].BASE_MODEL_FILE)

    cfg["DATA"].CLASSES = parse_class_map_file(cfg["DATA"].CLASS_MAP_FILE)
    cfg["DATA"].NUM_CLASSES = len(cfg["DATA"].CLASSES)

    if cfg["CNTK"].FAST_MODE:
        cfg["CNTK"].MAX_EPOCHS = 1

    if cfg["CNTK"].FORCE_DETERMINISTIC:
        force_deterministic_algorithms()
    np.random.seed(seed=cfg.RND_SEED)
Esempio n. 5
0
def prepare(cfg, use_arg_parser=True):
    cfg.MB_SIZE = 1
    cfg.NUM_CHANNELS = 3
    cfg.OUTPUT_PATH = os.path.join(abs_path, "Output")
    cfg["DATA"].MAP_FILE_PATH = os.path.join(abs_path, cfg["DATA"].MAP_FILE_PATH)
    running_locally = os.path.exists(cfg["DATA"].MAP_FILE_PATH)
    if running_locally:
        os.chdir(cfg["DATA"].MAP_FILE_PATH)
        if not os.path.exists(os.path.join(abs_path, "Output")):
            os.makedirs(os.path.join(abs_path, "Output"))
        if not os.path.exists(os.path.join(abs_path, "Output", cfg["DATA"].DATASET)):
            os.makedirs(os.path.join(abs_path, "Output", cfg["DATA"].DATASET))
    else:
        # disable debug and plot outputs when running on GPU cluster
        cfg["CNTK"].DEBUG_OUTPUT = False
        cfg.VISUALIZE_RESULTS = False

    if use_arg_parser:
        parse_arguments(cfg)

    data_path = cfg["DATA"].MAP_FILE_PATH
    if not os.path.isdir(data_path):
        raise RuntimeError("Directory %s does not exist" % data_path)

    cfg["DATA"].CLASS_MAP_FILE = os.path.join(data_path, cfg["DATA"].CLASS_MAP_FILE)
    cfg["DATA"].TRAIN_MAP_FILE = os.path.join(data_path, cfg["DATA"].TRAIN_MAP_FILE)
    cfg["DATA"].TEST_MAP_FILE = os.path.join(data_path, cfg["DATA"].TEST_MAP_FILE)
    cfg["DATA"].TRAIN_ROI_FILE = os.path.join(data_path, cfg["DATA"].TRAIN_ROI_FILE)
    cfg["DATA"].TEST_ROI_FILE = os.path.join(data_path, cfg["DATA"].TEST_ROI_FILE)
    if cfg.USE_PRECOMPUTED_PROPOSALS:
        try:
            cfg["DATA"].TRAIN_PRECOMPUTED_PROPOSALS_FILE = os.path.join(data_path, cfg["DATA"].TRAIN_PRECOMPUTED_PROPOSALS_FILE)
        except:
            print("To use precomputed proposals please specify the following parameters in your configuration:\n"
                  "__C.DATA.TRAIN_PRECOMPUTED_PROPOSALS_FILE\n"
                  "__C.DATA.TEST_PRECOMPUTED_PROPOSALS_FILE")
            exit(-1)

    cfg['MODEL_PATH'] = os.path.join(cfg.OUTPUT_PATH, "fast_rcnn_eval_{}.model".format(cfg["MODEL"].BASE_MODEL))
    cfg['BASE_MODEL_PATH'] = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "..", "..", "..", "PretrainedModels",
                                          cfg["MODEL"].BASE_MODEL_FILE)

    cfg["DATA"].CLASSES = parse_class_map_file(cfg["DATA"].CLASS_MAP_FILE)
    cfg["DATA"].NUM_CLASSES = len(cfg["DATA"].CLASSES)

    if cfg["CNTK"].FAST_MODE:
        cfg["CNTK"].MAX_EPOCHS = 1

    if cfg["CNTK"].FORCE_DETERMINISTIC:
        force_deterministic_algorithms()
    np.random.seed(seed=cfg.RND_SEED)
def set_global_vars(use_arg_parser=True):
    data_path = map_file_path

    # set and overwrite learning parameters
    globalvars['rpn_lr_factor'] = cfg["CNTK"].RPN_LR_FACTOR
    globalvars['frcn_lr_factor'] = cfg["CNTK"].FRCN_LR_FACTOR
    globalvars['e2e_lr_factor'] = cfg["CNTK"].E2E_LR_FACTOR
    globalvars['momentum_per_mb'] = cfg["CNTK"].MOMENTUM_PER_MB
    globalvars['e2e_epochs'] = 1 if cfg["CNTK"].FAST_MODE else cfg[
        "CNTK"].E2E_MAX_EPOCHS
    globalvars[
        'rpn_epochs'] = 1 if cfg["CNTK"].FAST_MODE else cfg["CNTK"].RPN_EPOCHS
    globalvars['frcn_epochs'] = 1 if cfg["CNTK"].FAST_MODE else cfg[
        "CNTK"].FRCN_EPOCHS
    globalvars['rnd_seed'] = cfg.RNG_SEED
    globalvars['train_conv'] = cfg["CNTK"].TRAIN_CONV_LAYERS
    globalvars['train_e2e'] = cfg["CNTK"].TRAIN_E2E

    globalvars['fea_map_dim'] = cfg["CNTK"].FEA_MAP_DIM

    if use_arg_parser:
        parser = argparse.ArgumentParser()
        parser.add_argument(
            '-datadir',
            '--datadir',
            help='Data directory where the ImageNet dataset is located',
            required=False,
            default=data_path)
        parser.add_argument('-outputdir',
                            '--outputdir',
                            help='Output directory for checkpoints and models',
                            required=False,
                            default=None)
        parser.add_argument('-logdir',
                            '--logdir',
                            help='Log file',
                            required=False,
                            default=None)
        parser.add_argument('-n',
                            '--num_epochs',
                            help='Total number of epochs to train',
                            type=int,
                            required=False,
                            default=cfg["CNTK"].E2E_MAX_EPOCHS)
        parser.add_argument('-m',
                            '--minibatch_size',
                            help='Minibatch size',
                            type=int,
                            required=False,
                            default=mb_size)
        parser.add_argument('-e',
                            '--epoch_size',
                            help='Epoch size',
                            type=int,
                            required=False,
                            default=epoch_size)
        parser.add_argument(
            '-q',
            '--quantized_bits',
            help='Number of quantized bits used for gradient aggregation',
            type=int,
            required=False,
            default='32')
        parser.add_argument(
            '-r',
            '--restart',
            help=
            'Indicating whether to restart from scratch (instead of restart from checkpoint file by default)',
            action='store_true')
        parser.add_argument(
            '-device',
            '--device',
            type=int,
            help="Force to run the script on a specified device",
            required=False,
            default=None)
        parser.add_argument('-rpnLrFactor',
                            '--rpnLrFactor',
                            type=float,
                            help="Scale factor for rpn lr schedule",
                            required=False)
        parser.add_argument('-frcnLrFactor',
                            '--frcnLrFactor',
                            type=float,
                            help="Scale factor for frcn lr schedule",
                            required=False)
        parser.add_argument('-e2eLrFactor',
                            '--e2eLrFactor',
                            type=float,
                            help="Scale factor for e2e lr schedule",
                            required=False)
        parser.add_argument('-momentumPerMb',
                            '--momentumPerMb',
                            type=float,
                            help="momentum per minibatch",
                            required=False)
        parser.add_argument('-e2eEpochs',
                            '--e2eEpochs',
                            type=int,
                            help="number of epochs for e2e training",
                            required=False)
        parser.add_argument('-rpnEpochs',
                            '--rpnEpochs',
                            type=int,
                            help="number of epochs for rpn training",
                            required=False)
        parser.add_argument('-frcnEpochs',
                            '--frcnEpochs',
                            type=int,
                            help="number of epochs for frcn training",
                            required=False)
        parser.add_argument('-rndSeed',
                            '--rndSeed',
                            type=int,
                            help="the random seed",
                            required=False)
        parser.add_argument('-trainConv',
                            '--trainConv',
                            type=int,
                            help="whether to train conv layers",
                            required=False)
        parser.add_argument('-trainE2E',
                            '--trainE2E',
                            type=int,
                            help="whether to train e2e (otherwise 4 stage)",
                            required=False)

        args = vars(parser.parse_args())

        if args['rpnLrFactor'] is not None:
            globalvars['rpn_lr_factor'] = args['rpnLrFactor']
        if args['frcnLrFactor'] is not None:
            globalvars['frcn_lr_factor'] = args['frcnLrFactor']
        if args['e2eLrFactor'] is not None:
            globalvars['e2e_lr_factor'] = args['e2eLrFactor']
        if args['momentumPerMb'] is not None:
            globalvars['momentum_per_mb'] = args['momentumPerMb']
        if args['e2eEpochs'] is not None:
            globalvars['e2e_epochs'] = args['e2eEpochs']
        if args['rpnEpochs'] is not None:
            globalvars['rpn_epochs'] = args['rpnEpochs']
        if args['frcnEpochs'] is not None:
            globalvars['frcn_epochs'] = args['frcnEpochs']
        if args['rndSeed'] is not None:
            globalvars['rnd_seed'] = args['rndSeed']
        if args['trainConv'] is not None:
            globalvars[
                'train_conv'] = True if args['trainConv'] == 1 else False
        if args['trainE2E'] is not None:
            globalvars['train_e2e'] = True if args['trainE2E'] == 1 else False

        if args['outputdir'] is not None:
            globalvars['output_path'] = args['outputdir']
        if args['logdir'] is not None:
            log_dir = args['logdir']
        if args['device'] is not None:
            # Setting one worker on GPU and one worker on CPU. Otherwise memory consumption is too high for a single GPU.
            if Communicator.rank() == 0:
                cntk.device.try_set_default_device(
                    cntk.device.gpu(args['device']))
            else:
                cntk.device.try_set_default_device(cntk.device.cpu())

        if args['datadir'] is not None:
            data_path = args['datadir']

    if not os.path.isdir(data_path):
        raise RuntimeError("Directory %s does not exist" % data_path)

    globalvars['class_map_file'] = os.path.join(data_path,
                                                globalvars['class_map_file'])
    globalvars['train_map_file'] = os.path.join(data_path,
                                                globalvars['train_map_file'])
    globalvars['test_map_file'] = os.path.join(data_path,
                                               globalvars['test_map_file'])
    globalvars['train_roi_file'] = os.path.join(data_path,
                                                globalvars['train_roi_file'])
    globalvars['test_roi_file'] = os.path.join(data_path,
                                               globalvars['test_roi_file'])

    if cfg["CNTK"].FORCE_DETERMINISTIC:
        force_deterministic_algorithms()
    np.random.seed(seed=globalvars['rnd_seed'])
    globalvars['classes'] = parse_class_map_file(globalvars['class_map_file'])
    globalvars['num_classes'] = len(globalvars['classes'])

    if cfg["CNTK"].DEBUG_OUTPUT:
        # report args
        print("Using the following parameters:")
        print("Flip image       : {}".format(cfg["TRAIN"].USE_FLIPPED))
        print("Train conv layers: {}".format(globalvars['train_conv']))
        print("Random seed      : {}".format(globalvars['rnd_seed']))
        print("Momentum per MB  : {}".format(globalvars['momentum_per_mb']))
        if globalvars['train_e2e']:
            print("E2E epochs       : {}".format(globalvars['e2e_epochs']))
        else:
            print("RPN lr factor    : {}".format(globalvars['rpn_lr_factor']))
            print("RPN epochs       : {}".format(globalvars['rpn_epochs']))
            print("FRCN lr factor   : {}".format(globalvars['frcn_lr_factor']))
            print("FRCN epochs      : {}".format(globalvars['frcn_epochs']))
num_channels = cfg["CNTK"].NUM_CHANNELS

# dims_input -- (pad_width, pad_height, scaled_image_width, scaled_image_height, orig_img_width, orig_img_height)
dims_input_const = MinibatchData(Value(batch=np.asarray(
    [image_width, image_height, image_width, image_height, image_width, image_height], dtype=np.float32)), 1, 1, False)

# Color used for padding and normalization (Caffe model uses [102.98010, 115.94650, 122.77170])
img_pad_value = [103, 116, 123] if cfg["CNTK"].BASE_MODEL == "VGG16" else [114, 114, 114]
normalization_const = Constant([[[103]], [[116]], [[123]]]) if cfg["CNTK"].BASE_MODEL == "VGG16" else Constant([[[114]], [[114]], [[114]]])


globalvars = {}

map_file_path = cfg["CNTK"].MODEL_DIRECTORY
globalvars['class_map_file'] = os.path.join(map_file_path, cfg["CNTK"].CLASS_MAP_FILE)
globalvars['classes'] = parse_class_map_file(globalvars['class_map_file'])
globalvars['num_classes'] = len(globalvars['classes'])
globalvars['temppath'] = cfg["CNTK"].TEMP_PATH
feature_node_name = cfg["CNTK"].FEATURE_NODE_NAME
model_path = os.path.join(cfg["CNTK"].MODEL_DIRECTORY, cfg["CNTK"].MODEL_NAME)

# helper function
def load_resize_and_pad(image_path, width, height, pad_value=114):
    if "@" in image_path:
        print("WARNING: zipped image archives are not supported for visualizing results.")
        exit(0)

    img = cv2.imread(image_path)
    img_width = len(img[0])
    img_height = len(img)
    scale_w = img_width > img_height
def prepare(cfg, use_arg_parser=True):
    cfg.MB_SIZE = 1
    cfg.NUM_CHANNELS = 3
    cfg.OUTPUT_PATH = os.path.join(abs_path, "Output")

    data_path = cfg["DATA"].MAP_FILE_PATH
    if load_file_from_blob(cfg["AZURE"].ACCOUNT_NAME, \
                        cfg["AZURE"].DATA, cfg["DATA"].DATASET +".zip", data_path+".zip" ) is True:
        unzip_file(data_path + ".zip", data_path)

    if not os.path.isdir(data_path):
        raise RuntimeError("Directory %s does not exist" % data_path)

    running_locally = os.path.exists(cfg["DATA"].MAP_FILE_PATH)
    if running_locally:
        os.chdir(cfg["DATA"].MAP_FILE_PATH)
        if not os.path.exists(os.path.join(abs_path, "Output")):
            os.makedirs(os.path.join(abs_path, "Output"))
        if not os.path.exists(
                os.path.join(abs_path, "Output", cfg["DATA"].DATASET)):
            os.makedirs(os.path.join(abs_path, "Output", cfg["DATA"].DATASET))
    else:
        # disable debug and plot outputs when running on GPU cluster
        cfg["CNTK"].DEBUG_OUTPUT = False
        cfg.VISUALIZE_RESULTS = False

    if use_arg_parser:
        parse_arguments(cfg)

    cfg["DATA"].CLASS_MAP_FILE = os.path.join(data_path,
                                              cfg["DATA"].CLASS_MAP_FILE)
    cfg["DATA"].TRAIN_MAP_FILE = os.path.join(data_path,
                                              cfg["DATA"].TRAIN_MAP_FILE)
    cfg["DATA"].TEST_MAP_FILE = os.path.join(data_path,
                                             cfg["DATA"].TEST_MAP_FILE)
    cfg["DATA"].TRAIN_ROI_FILE = os.path.join(data_path,
                                              cfg["DATA"].TRAIN_ROI_FILE)
    cfg["DATA"].TEST_ROI_FILE = os.path.join(data_path,
                                             cfg["DATA"].TEST_ROI_FILE)

    cfg['OUTPUT_MODEL_NAME'] = "faster_rcnn_eval_{}_{}.model".format(
        cfg["MODEL"].BASE_MODEL, "e2e" if cfg["CNTK"].TRAIN_E2E else "4stage")
    cfg['MODEL_PATH'] = os.path.join(
        os.environ['AZUREML_NATIVE_SHARE_DIRECTORY'], "output",
        cfg['OUTPUT_MODEL_NAME'])
    cfg['BASE_MODEL_PATH'] = os.path.join(
        os.environ['AZUREML_NATIVE_SHARE_DIRECTORY'], "base_model")

    cfg["DATA"].CLASSES = parse_class_map_file(cfg["DATA"].CLASS_MAP_FILE)
    cfg["DATA"].NUM_CLASSES = len(cfg["DATA"].CLASSES)

    if cfg["CNTK"].FAST_MODE:
        cfg["CNTK"].E2E_MAX_EPOCHS = 1
        cfg["CNTK"].RPN_EPOCHS = 1
        cfg["CNTK"].FRCN_EPOCHS = 1

    if cfg["CNTK"].FORCE_DETERMINISTIC:
        force_deterministic_algorithms()
    np.random.seed(seed=cfg.RND_SEED)

    print("Downloading base model {}".format(cfg["MODEL"].BASE_MODEL_FILE))
    print("PRETRAINED_MODELS {} ".format(cfg["AZURE"].PRETRAINED_MODELS))
    print("BASE_MODEL_FILE {}".format(cfg["MODEL"].BASE_MODEL_FILE))
    cfg['BASE_MODEL_PATH'] = cfg['BASE_MODEL_PATH'] + '_' + cfg[
        "MODEL"].BASE_MODEL_FILE
    print("BASE_MODEL_PATH {}".format(cfg['BASE_MODEL_PATH']))
    # For testing to make sure this works when we don't have any stored model in shared directory
    # os.remove(cfg['BASE_MODEL_PATH'])
    load_file_from_blob(cfg["AZURE"].ACCOUNT_NAME, \
                        cfg["AZURE"].PRETRAINED_MODELS, cfg["MODEL"].BASE_MODEL_FILE, cfg['BASE_MODEL_PATH'] )
    if False and cfg["CNTK"].DEBUG_OUTPUT:
        # report args
        print("Using the following parameters:")
        print("Flip image       : {}".format(cfg["TRAIN"].USE_FLIPPED))
        print("Train conv layers: {}".format(cfg.TRAIN_CONV_LAYERS))
        print("Random seed      : {}".format(cfg.RND_SEED))
        print("Momentum per MB  : {}".format(cfg["CNTK"].MOMENTUM_PER_MB))
        if cfg["CNTK"].TRAIN_E2E:
            print("E2E epochs       : {}".format(cfg["CNTK"].E2E_MAX_EPOCHS))
        else:
            print("RPN lr factor    : {}".format(cfg["CNTK"].RPN_LR_FACTOR))
            print("RPN epochs       : {}".format(cfg["CNTK"].RPN_EPOCHS))
            print("FRCN lr factor   : {}".format(cfg["CNTK"].FRCN_LR_FACTOR))
            print("FRCN epochs      : {}".format(cfg["CNTK"].FRCN_EPOCHS))
Esempio n. 9
0
def prepare(cfg, use_arg_parser=True):
    cfg.MB_SIZE = 1
    cfg.NUM_CHANNELS = 3
    cfg.OUTPUT_PATH = os.path.join(abs_path, "Output")
    cfg["DATA"].MAP_FILE_PATH = os.path.join(abs_path, cfg["DATA"].MAP_FILE_PATH)
    running_locally = os.path.exists(cfg["DATA"].MAP_FILE_PATH)
    if running_locally:
        os.chdir(cfg["DATA"].MAP_FILE_PATH)
        if not os.path.exists(os.path.join(abs_path, "Output")):
            os.makedirs(os.path.join(abs_path, "Output"))
        if not os.path.exists(os.path.join(abs_path, "Output", cfg["DATA"].DATASET)):
            os.makedirs(os.path.join(abs_path, "Output", cfg["DATA"].DATASET))
    else:
        # disable debug and plot outputs when running on GPU cluster
        cfg["CNTK"].DEBUG_OUTPUT = False
        cfg.VISUALIZE_RESULTS = False

    if use_arg_parser:
        parse_arguments(cfg)

    data_path = cfg["DATA"].MAP_FILE_PATH
    if not os.path.isdir(data_path):
        raise RuntimeError("Directory %s does not exist" % data_path)

    cfg["DATA"].CLASS_MAP_FILE = os.path.join(data_path, cfg["DATA"].CLASS_MAP_FILE)
    cfg["DATA"].TRAIN_MAP_FILE = os.path.join(data_path, cfg["DATA"].TRAIN_MAP_FILE)
    cfg["DATA"].TEST_MAP_FILE = os.path.join(data_path, cfg["DATA"].TEST_MAP_FILE)
    cfg["DATA"].TRAIN_ROI_FILE = os.path.join(data_path, cfg["DATA"].TRAIN_ROI_FILE)
    cfg["DATA"].TEST_ROI_FILE = os.path.join(data_path, cfg["DATA"].TEST_ROI_FILE)

    cfg['MODEL_PATH'] = os.path.join(cfg.OUTPUT_PATH, "faster_rcnn_eval_{}_{}.model"
                                     .format(cfg["MODEL"].BASE_MODEL, "e2e" if cfg["CNTK"].TRAIN_E2E else "4stage"))
    cfg['BASE_MODEL_PATH'] = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "..", "..", "..", "PretrainedModels",
                                          cfg["MODEL"].BASE_MODEL_FILE)

    cfg["DATA"].CLASSES = parse_class_map_file(cfg["DATA"].CLASS_MAP_FILE)
    cfg["DATA"].NUM_CLASSES = len(cfg["DATA"].CLASSES)

    if cfg["CNTK"].FAST_MODE:
        cfg["CNTK"].E2E_MAX_EPOCHS = 1
        cfg["CNTK"].RPN_EPOCHS = 1
        cfg["CNTK"].FRCN_EPOCHS = 1

    if cfg["CNTK"].FORCE_DETERMINISTIC:
        force_deterministic_algorithms()
    np.random.seed(seed=cfg.RND_SEED)

    if False and cfg["CNTK"].DEBUG_OUTPUT:
        # report args
        print("Using the following parameters:")
        print("Flip image       : {}".format(cfg["TRAIN"].USE_FLIPPED))
        print("Train conv layers: {}".format(cfg.TRAIN_CONV_LAYERS))
        print("Random seed      : {}".format(cfg.RND_SEED))
        print("Momentum per MB  : {}".format(cfg["CNTK"].MOMENTUM_PER_MB))
        if cfg["CNTK"].TRAIN_E2E:
            print("E2E epochs       : {}".format(cfg["CNTK"].E2E_MAX_EPOCHS))
        else:
            print("RPN lr factor    : {}".format(cfg["CNTK"].RPN_LR_FACTOR))
            print("RPN epochs       : {}".format(cfg["CNTK"].RPN_EPOCHS))
            print("FRCN lr factor   : {}".format(cfg["CNTK"].FRCN_LR_FACTOR))
            print("FRCN epochs      : {}".format(cfg["CNTK"].FRCN_EPOCHS))
Esempio n. 10
0
                           dtype=np.float32)), 1, 1, False)

# Color used for padding and normalization (Caffe model uses [102.98010, 115.94650, 122.77170])
img_pad_value = [103, 116, 123
                 ] if cfg["CNTK"].BASE_MODEL == "VGG16" else [114, 114, 114]
normalization_const = Constant([[[103]], [[116]], [[
    123
]]]) if cfg["CNTK"].BASE_MODEL == "VGG16" else Constant([[[114]], [[114]],
                                                         [[114]]])

globalvars = {}

map_file_path = cfg["CNTK"].MODEL_DIRECTORY
globalvars['class_map_file'] = os.path.join(map_file_path,
                                            cfg["CNTK"].CLASS_MAP_FILE)
globalvars['classes'] = parse_class_map_file(globalvars['class_map_file'])
globalvars['num_classes'] = len(globalvars['classes'])
globalvars['temppath'] = cfg["CNTK"].TEMP_PATH
feature_node_name = cfg["CNTK"].FEATURE_NODE_NAME
model_path = os.path.join(cfg["CNTK"].MODEL_DIRECTORY, cfg["CNTK"].MODEL_NAME)


# helper function
def load_resize_and_pad(image_path, width, height, pad_value=114):
    if "@" in image_path:
        print(
            "WARNING: zipped image archives are not supported for visualizing results."
        )
        exit(0)

    img = cv2.imread(image_path)
Esempio n. 11
0
def set_global_vars(use_arg_parser = True):
    data_path = map_file_path

    # set and overwrite learning parameters
    globalvars['rpn_lr_factor'] = cfg["CNTK"].RPN_LR_FACTOR
    globalvars['frcn_lr_factor'] = cfg["CNTK"].FRCN_LR_FACTOR
    globalvars['e2e_lr_factor'] = cfg["CNTK"].E2E_LR_FACTOR
    globalvars['momentum_per_mb'] = cfg["CNTK"].MOMENTUM_PER_MB
    globalvars['e2e_epochs'] = 1 if cfg["CNTK"].FAST_MODE else cfg["CNTK"].E2E_MAX_EPOCHS
    globalvars['rpn_epochs'] = 1 if cfg["CNTK"].FAST_MODE else cfg["CNTK"].RPN_EPOCHS
    globalvars['frcn_epochs'] = 1 if cfg["CNTK"].FAST_MODE else cfg["CNTK"].FRCN_EPOCHS
    globalvars['rnd_seed'] = cfg.RNG_SEED
    globalvars['train_conv'] = cfg["CNTK"].TRAIN_CONV_LAYERS
    globalvars['train_e2e'] = cfg["CNTK"].TRAIN_E2E


    if use_arg_parser:
        parser = argparse.ArgumentParser()
        parser.add_argument('-datadir', '--datadir', help='Data directory where the ImageNet dataset is located',
                            required=False, default=data_path)
        parser.add_argument('-outputdir', '--outputdir', help='Output directory for checkpoints and models',
                            required=False, default=None)
        parser.add_argument('-logdir', '--logdir', help='Log file',
                            required=False, default=None)
        parser.add_argument('-n', '--num_epochs', help='Total number of epochs to train', type=int,
                            required=False, default=cfg["CNTK"].E2E_MAX_EPOCHS)
        parser.add_argument('-m', '--minibatch_size', help='Minibatch size', type=int,
                            required=False, default=mb_size)
        parser.add_argument('-e', '--epoch_size', help='Epoch size', type=int,
                            required=False, default=epoch_size)
        parser.add_argument('-q', '--quantized_bits', help='Number of quantized bits used for gradient aggregation', type=int,
                            required=False, default='32')
        parser.add_argument('-r', '--restart',
                            help='Indicating whether to restart from scratch (instead of restart from checkpoint file by default)',
                            action='store_true')
        parser.add_argument('-device', '--device', type=int, help="Force to run the script on a specified device",
                            required=False, default=None)
        parser.add_argument('-rpnLrFactor', '--rpnLrFactor', type=float, help="Scale factor for rpn lr schedule", required=False)
        parser.add_argument('-frcnLrFactor', '--frcnLrFactor', type=float, help="Scale factor for frcn lr schedule", required=False)
        parser.add_argument('-e2eLrFactor', '--e2eLrFactor', type=float, help="Scale factor for e2e lr schedule", required=False)
        parser.add_argument('-momentumPerMb', '--momentumPerMb', type=float, help="momentum per minibatch", required=False)
        parser.add_argument('-e2eEpochs', '--e2eEpochs', type=int, help="number of epochs for e2e training", required=False)
        parser.add_argument('-rpnEpochs', '--rpnEpochs', type=int, help="number of epochs for rpn training", required=False)
        parser.add_argument('-frcnEpochs', '--frcnEpochs', type=int, help="number of epochs for frcn training", required=False)
        parser.add_argument('-rndSeed', '--rndSeed', type=int, help="the random seed", required=False)
        parser.add_argument('-trainConv', '--trainConv', type=int, help="whether to train conv layers", required=False)
        parser.add_argument('-trainE2E', '--trainE2E', type=int, help="whether to train e2e (otherwise 4 stage)", required=False)

        args = vars(parser.parse_args())

        if args['rpnLrFactor'] is not None:
            globalvars['rpn_lr_factor'] = args['rpnLrFactor']
        if args['frcnLrFactor'] is not None:
            globalvars['frcn_lr_factor'] = args['frcnLrFactor']
        if args['e2eLrFactor'] is not None:
            globalvars['e2e_lr_factor'] = args['e2eLrFactor']
        if args['momentumPerMb'] is not None:
            globalvars['momentum_per_mb'] = args['momentumPerMb']
        if args['e2eEpochs'] is not None:
            globalvars['e2e_epochs'] = args['e2eEpochs']
        if args['rpnEpochs'] is not None:
            globalvars['rpn_epochs'] = args['rpnEpochs']
        if args['frcnEpochs'] is not None:
            globalvars['frcn_epochs'] = args['frcnEpochs']
        if args['rndSeed'] is not None:
            globalvars['rnd_seed'] = args['rndSeed']
        if args['trainConv'] is not None:
            globalvars['train_conv'] = True if args['trainConv']==1 else False
        if args['trainE2E'] is not None:
            globalvars['train_e2e'] = True if args['trainE2E']==1 else False

        if args['outputdir'] is not None:
            globalvars['output_path'] = args['outputdir']
        if args['logdir'] is not None:
            log_dir = args['logdir']
        if args['device'] is not None:
            # Setting one worker on GPU and one worker on CPU. Otherwise memory consumption is too high for a single GPU.
            if Communicator.rank() == 0:
                cntk.device.try_set_default_device(cntk.device.gpu(args['device']))
            else:
                cntk.device.try_set_default_device(cntk.device.cpu())

        if args['datadir'] is not None:
            data_path = args['datadir']

    if not os.path.isdir(data_path):
        raise RuntimeError("Directory %s does not exist" % data_path)

    globalvars['class_map_file'] = os.path.join(data_path, globalvars['class_map_file'])
    globalvars['train_map_file'] = os.path.join(data_path, globalvars['train_map_file'])
    globalvars['test_map_file'] = os.path.join(data_path, globalvars['test_map_file'])
    globalvars['train_roi_file'] = os.path.join(data_path, globalvars['train_roi_file'])
    globalvars['test_roi_file'] = os.path.join(data_path, globalvars['test_roi_file'])

    if cfg["CNTK"].FORCE_DETERMINISTIC:
        force_deterministic_algorithms()
    np.random.seed(seed=globalvars['rnd_seed'])
    globalvars['classes'] = parse_class_map_file(globalvars['class_map_file'])
    globalvars['num_classes'] = len(globalvars['classes'])

    if cfg["CNTK"].DEBUG_OUTPUT:
        # report args
        print("Using the following parameters:")
        print("Flip image       : {}".format(cfg["TRAIN"].USE_FLIPPED))
        print("Train conv layers: {}".format(globalvars['train_conv']))
        print("Random seed      : {}".format(globalvars['rnd_seed']))
        print("Momentum per MB  : {}".format(globalvars['momentum_per_mb']))
        if globalvars['train_e2e']:
            print("E2E epochs       : {}".format(globalvars['e2e_epochs']))
        else:
            print("RPN lr factor    : {}".format(globalvars['rpn_lr_factor']))
            print("RPN epochs       : {}".format(globalvars['rpn_epochs']))
            print("FRCN lr factor   : {}".format(globalvars['frcn_lr_factor']))
            print("FRCN epochs      : {}".format(globalvars['frcn_epochs']))
Esempio n. 12
0
def set_global_vars(use_arg_parser=True):
    global globalvars
    global image_width
    global image_height
    global dims_input_const
    global img_pad_value
    global normalization_const
    global map_file_path
    global epoch_size
    global num_test_images
    global model_folder
    global base_model_file
    global feature_node_name
    global last_conv_node_name
    global start_train_conv_node_name
    global pool_node_name
    global last_hidden_node_name
    global roi_dim
    global prediction
    global prediction_in
    global prediction_out

    if use_arg_parser:
        parser = argparse.ArgumentParser()
        parser.add_argument('-c',
                            '--config',
                            help='Configuration file in YAML format',
                            required=False,
                            default=None)
        parser.add_argument('-t',
                            '--device_type',
                            type=str,
                            help="The type of the device (cpu|gpu)",
                            required=False,
                            default="cpu")
        parser.add_argument(
            '-d',
            '--device',
            type=int,
            help="Force to run the script on a specified device",
            required=False,
            default=None)
        parser.add_argument('-l',
                            '--list_devices',
                            action='store_true',
                            help="Lists the available devices and exits",
                            required=False,
                            default=False)
        parser.add_argument('--prediction',
                            action='store_true',
                            help="Switches to prediction mode",
                            required=False,
                            default=False)
        parser.add_argument(
            '--prediction_in',
            action='append',
            type=str,
            help=
            "The input directory for images in prediction mode. Can be supplied mulitple times.",
            required=False,
            default=list())
        parser.add_argument(
            '--prediction_out',
            action='append',
            type=str,
            help=
            "The output directory for processed images and predicitons in prediction mode. Can be supplied mulitple times.",
            required=False,
            default=list())
        parser.add_argument(
            '--no_headers',
            action='store_true',
            help="Whether to suppress the header row in the ROI CSV files",
            required=False,
            default=False)
        parser.add_argument(
            '--output_width_height',
            action='store_true',
            help=
            "Whether to output width/height instead of second x/y in the ROI CSV files",
            required=False,
            default=False)
        parser.add_argument(
            '--suppressed_labels',
            type=str,
            help=
            "Comma-separated list of labels to suppress from being output in ROI CSV files.",
            required=False,
            default="")

        args = vars(parser.parse_args())

        # prediction mode?
        prediction = args['prediction']
        if prediction:
            prediction_in = args['prediction_in']
            if len(prediction_in) == 0:
                raise RuntimeError("No prediction input directory provided!")
            for p in prediction_in:
                if not os.path.exists(p):
                    raise RuntimeError(
                        "Prediction input directory '%s' does not exist" % p)
            prediction_out = args['prediction_out']
            if len(prediction_out) == 0:
                raise RuntimeError("No prediction output directory provided!")
            for p in prediction_out:
                if not os.path.exists(p):
                    raise RuntimeError(
                        "Prediction output directory '%s' does not exist" % p)
            if len(prediction_in) != len(prediction_out):
                raise RuntimeError(
                    "Number of input and output directories don't match: %i != %i"
                    % (len(prediction_in), len(prediction_out)))
            for i in range(len(prediction_in)):
                if prediction_in[i] == prediction_out[i]:
                    raise RuntimeError(
                        "Input and output directories #%i for prediction are the same: %s"
                        % ((i + 1), prediction_in[i]))

        if args['list_devices']:
            print("Available devices (Type - ID - description)")
            for d in cntk.device.all_devices():
                if d.type() == 0:
                    type = "cpu"
                elif d.type() == 1:
                    type = "gpu"
                else:
                    type = "<unknown:" + str(d.type()) + ">"
                print(type + " - " + str(d.id()) + " - " + str(d))
            sys.exit(0)
        if args['config'] is not None:
            cfg_from_file(args['config'])
        if args['device'] is not None:
            if args['device_type'] == 'gpu':
                cntk.device.try_set_default_device(
                    cntk.device.gpu(args['device']))
            else:
                cntk.device.try_set_default_device(cntk.device.cpu())

    image_width = cfg["CNTK"].IMAGE_WIDTH
    image_height = cfg["CNTK"].IMAGE_HEIGHT

    # dims_input -- (pad_width, pad_height, scaled_image_width, scaled_image_height, orig_img_width, orig_img_height)
    dims_input_const = MinibatchData(
        Value(batch=np.asarray([
            image_width, image_height, image_width, image_height, image_width,
            image_height
        ],
                               dtype=np.float32)), 1, 1, False)

    # Color used for padding and normalization (Caffe model uses [102.98010, 115.94650, 122.77170])
    img_pad_value = [103, 116, 123] if cfg["CNTK"].BASE_MODEL == "VGG16" else [
        114, 114, 114
    ]
    normalization_const = Constant([[[103]], [[116]], [[
        123
    ]]]) if cfg["CNTK"].BASE_MODEL == "VGG16" else Constant([[[114]], [[114]],
                                                             [[114]]])

    # dataset specific parameters
    map_file_path = os.path.join(abs_path, cfg["CNTK"].MAP_FILE_PATH)
    globalvars['class_map_file'] = cfg["CNTK"].CLASS_MAP_FILE
    globalvars['train_map_file'] = cfg["CNTK"].TRAIN_MAP_FILE
    globalvars['test_map_file'] = cfg["CNTK"].TEST_MAP_FILE
    globalvars['train_roi_file'] = cfg["CNTK"].TRAIN_ROI_FILE
    globalvars['test_roi_file'] = cfg["CNTK"].TEST_ROI_FILE
    globalvars['output_path'] = cfg["CNTK"].OUTPUT_PATH
    epoch_size = cfg["CNTK"].NUM_TRAIN_IMAGES
    num_test_images = cfg["CNTK"].NUM_TEST_IMAGES

    # model specific parameters
    if cfg["CNTK"].PRETRAINED_MODELS.startswith(".."):
        model_folder = os.path.join(abs_path, cfg["CNTK"].PRETRAINED_MODELS)
    else:
        model_folder = cfg["CNTK"].PRETRAINED_MODELS
    base_model_file = os.path.join(model_folder, cfg["CNTK"].BASE_MODEL_FILE)
    feature_node_name = cfg["CNTK"].FEATURE_NODE_NAME
    last_conv_node_name = cfg["CNTK"].LAST_CONV_NODE_NAME
    start_train_conv_node_name = cfg["CNTK"].START_TRAIN_CONV_NODE_NAME
    pool_node_name = cfg["CNTK"].POOL_NODE_NAME
    last_hidden_node_name = cfg["CNTK"].LAST_HIDDEN_NODE_NAME
    roi_dim = cfg["CNTK"].ROI_DIM

    data_path = map_file_path

    # set and overwrite learning parameters
    globalvars['rpn_lr_factor'] = cfg["CNTK"].RPN_LR_FACTOR
    globalvars['frcn_lr_factor'] = cfg["CNTK"].FRCN_LR_FACTOR
    globalvars['e2e_lr_factor'] = cfg["CNTK"].E2E_LR_FACTOR
    globalvars['momentum_per_mb'] = cfg["CNTK"].MOMENTUM_PER_MB
    globalvars['e2e_epochs'] = 1 if cfg["CNTK"].FAST_MODE else cfg[
        "CNTK"].E2E_MAX_EPOCHS
    globalvars[
        'rpn_epochs'] = 1 if cfg["CNTK"].FAST_MODE else cfg["CNTK"].RPN_EPOCHS
    globalvars['frcn_epochs'] = 1 if cfg["CNTK"].FAST_MODE else cfg[
        "CNTK"].FRCN_EPOCHS
    globalvars['rnd_seed'] = cfg.RNG_SEED
    globalvars['train_conv'] = cfg["CNTK"].TRAIN_CONV_LAYERS
    globalvars['train_e2e'] = cfg["CNTK"].TRAIN_E2E

    if not os.path.isdir(data_path):
        raise RuntimeError("Directory %s does not exist" % data_path)

    globalvars['class_map_file'] = os.path.join(data_path,
                                                globalvars['class_map_file'])
    globalvars['train_map_file'] = os.path.join(data_path,
                                                globalvars['train_map_file'])
    globalvars['test_map_file'] = os.path.join(data_path,
                                               globalvars['test_map_file'])
    globalvars['train_roi_file'] = os.path.join(data_path,
                                                globalvars['train_roi_file'])
    globalvars['test_roi_file'] = os.path.join(data_path,
                                               globalvars['test_roi_file'])
    globalvars['headers'] = not args['no_headers']
    globalvars['output_width_height'] = args['output_width_height']
    suppressed_labels = []
    if len(args['suppressed_labels']) > 0:
        suppressed_labels = args['suppressed_labels'].split(",")
    globalvars['suppressed_labels'] = suppressed_labels

    if cfg["CNTK"].FORCE_DETERMINISTIC:
        force_deterministic_algorithms()
    np.random.seed(seed=globalvars['rnd_seed'])
    globalvars['classes'] = parse_class_map_file(globalvars['class_map_file'])
    globalvars['num_classes'] = len(globalvars['classes'])

    if cfg["CNTK"].DEBUG_OUTPUT:
        # report args
        print("Using the following parameters:")
        print("Flip image       : {}".format(cfg["TRAIN"].USE_FLIPPED))
        print("Train conv layers: {}".format(globalvars['train_conv']))
        print("Random seed      : {}".format(globalvars['rnd_seed']))
        print("Momentum per MB  : {}".format(globalvars['momentum_per_mb']))
        if globalvars['train_e2e']:
            print("E2E epochs       : {}".format(globalvars['e2e_epochs']))
        else:
            print("RPN lr factor    : {}".format(globalvars['rpn_lr_factor']))
            print("RPN epochs       : {}".format(globalvars['rpn_epochs']))
            print("FRCN lr factor   : {}".format(globalvars['frcn_lr_factor']))
            print("FRCN epochs      : {}".format(globalvars['frcn_epochs']))