コード例 #1
0
 def parse_arguments(self):
     '''
     Merge the scar.conf parameters, the cmd parameters and the yaml file parameters in a single dictionary.
     
     The precedence of parameters is CMD >> YAML >> SCAR.CONF
     That is, the CMD parameter will override any other configuration, 
     and the YAML parameters will override the SCAR.CONF settings
     '''
     merged_args = ConfigFileParser().get_properties()
     cmd_args = CommandParser(self).parse_arguments()
     if 'conf_file' in cmd_args['scar'] and cmd_args['scar']['conf_file']:
         yaml_args = YamlParser(cmd_args['scar']).parse_arguments()
         merged_args = utils.merge_dicts(yaml_args, merged_args)
     merged_args = utils.merge_dicts(cmd_args, merged_args)
     self.cloud_provider.parse_arguments(**merged_args)
     merged_args['scar']['func']()
コード例 #2
0
    def __init__(self, run_args, lite_mode=True):

        if lite_mode:
            EnsembleModel.forward_decoder = forward_decoder

        run_args = merge_dicts(default_args, vars(run_args))
        self._fill_hardware_args(run_args)
        self.args = Bunch(run_args)
        self._load_tokenizer()
        self._load_model(lite_mode)
コード例 #3
0
ファイル: cli.py プロジェクト: opt9/scar
 def parse_arguments(self):
     '''Command parsing and selection'''
     try:
         cmd_args = vars(self.parser.parse_args())
         if 'func' not in cmd_args:
             raise excp.MissingCommandError()
         scar_args = self.parse_scar_args(cmd_args)
         aws_args = self.parse_aws_args(cmd_args)
         return utils.merge_dicts(scar_args, aws_args)
     except AttributeError as ae:
         logger.error(
             "Incorrect arguments: use scar -h to see the options available",
             "Error parsing arguments: %s" % ae)
     else:
         raise
コード例 #4
0
    def set_properties(self, args):
        # Set the command line parsed properties
        self.properties = utils.merge_dicts(self.properties, vars(args))
        call_type = self.set_call_type(args.func.__name__)
        self.set_output_type()
        if ((call_type != CallType.LS) and (not self.delete_all())
                and (call_type != CallType.PUT)
                and (call_type != CallType.GET)):
            if (call_type == CallType.INIT):
                if (not self.get_property("name")) or (
                        self.get_property("name") == ""):
                    func_name = "function"
                    if self.get_property("image_id") != "":
                        func_name = self.get_property("image_id")
                    elif self.get_property("image_file") != "":
                        func_name = self.get_property("image_file").split(
                            '.')[0]
                    self.properties["name"] = self.create_function_name(
                        func_name)
                self.set_tags()

            self.check_function_name()
            function_name = self.get_property("name")
            validators.validate_function_name(function_name,
                                              self.get_property("name_regex"))

            self.set_environment_variables()
            self.properties["handler"] = function_name + ".lambda_handler"
            self.properties["log_group_name"] = '/aws/lambda/' + function_name

            if (call_type == CallType.INIT):
                self.set_function_code()

            if (call_type == CallType.RUN):
                if self.get_argument_value(args, 'run_script'):
                    file_content = utils.read_file(
                        self.get_property("run_script"), 'rb')
                    # We first code to base64 in bytes and then decode those bytes to allow json to work
                    # https://stackoverflow.com/questions/37225035/serialize-in-json-a-base64-encoded-data#37239382
                    parsed_script = utils.utf8_to_base64_string(file_content)
                    self.set_property('payload', {"script": parsed_script})

                if self.get_argument_value(args, 'c_args'):
                    parsed_cont_args = json.dumps(self.get_property("c_args"))
                    self.set_property('payload',
                                      {"cmd_args": parsed_cont_args})
コード例 #5
0
ファイル: lambdafunction.py プロジェクト: rofrnnds/scar
    def set_properties(self, args):
        # Set the command line parsed properties
        self.properties = utils.merge_dicts(self.properties, vars(args))
        call_type = self.set_call_type(args.func.__name__)
        self.set_output_type()
        if ((call_type != CallType.LS) and (not self.delete_all())
                and (call_type != CallType.PUT)
                and (call_type != CallType.GET)):
            if (call_type == CallType.INIT):
                if (not self.get_property("name")) or (
                        self.get_property("name") == ""):
                    func_name = "function"
                    if self.get_property("image_id") != "":
                        func_name = self.get_property("image_id")
                    elif self.get_property("image_file") != "":
                        func_name = self.get_property("image_file").split(
                            '.')[0]
                    self.properties["name"] = self.create_function_name(
                        func_name)
                self.set_tags()

            self.check_function_name()
            function_name = self.get_property("name")
            validators.validate_function_name(function_name,
                                              self.get_property("name_regex"))

            self.set_environment_variables()
            self.properties["handler"] = function_name + ".lambda_handler"
            self.properties["log_group_name"] = '/aws/lambda/' + function_name

            if (call_type == CallType.INIT):
                self.set_function_code()

            if (call_type == CallType.RUN):
                self.update_function_attributes(args)
                if self.get_argument_value(args, 'script'):
                    parsed_script = utils.escape_string(
                        self.get_property("script").read())
                    self.set_property('payload', {"script": parsed_script})
                if self.get_argument_value(args, 'cont_args'):
                    parsed_cont_args = utils.escape_list(
                        self.get_property("cont_args"))
                    self.set_property('payload',
                                      {"cmd_args": parsed_cont_args})
コード例 #6
0
    def __init__(self, config):
        # check if all env_config keys are in default config
        custom_keys = config.keys()
        if not all(key in ENV_DEFAULT_CONFIG for key in custom_keys):
            raise KeyError(
                "Custom environment configuration not found in default configuration."
            )
        self.config = merge_dicts(ENV_DEFAULT_CONFIG, config)
        self._process_and_validate_config()

        self.grid = None
        self.robot_positions = None
        self.can_cnt = 0
        self._timestep = 0

        # required attributes for Gym environments
        self.action_space = spaces.Discrete(NUM_ACTS)
        self.observation_space = self.get_observation_space()
        self.reward_range = (-float('inf'), float('inf'))
        self.metadata = {'render.modes': ['human', 'rgb_array']}
        self.spec = None
        self.viewer = None
コード例 #7
0
def eval_language_metrics(checkpoint,
                          eval_data_loader,
                          opt,
                          model=None,
                          eval_mode="val"):
    """eval_mode can only be set to `val` here, as setting to `test` is cheating
    0, run inference
    1, Get METEOR, BLEU1-4, CIDEr scores
    2, Get vocab size, sentence length
    """
    translator = Translator(opt, checkpoint, model=model)
    json_res = run_translate(eval_data_loader, translator, opt=opt)
    res_filepath = os.path.abspath(
        opt.save_model + "_tmp_greedy_pred_{}.json".format(eval_mode))
    save_json(json_res, res_filepath, save_pretty=True)

    if opt.dset_name == "anet":
        reference_files_map = {
            "val": [
                os.path.join(opt.data_dir, e) for e in [
                    "anet_entities_val_1_para.json",
                    "anet_entities_val_2_para.json"
                ]
            ],
            "test": [
                os.path.join(opt.data_dir, e) for e in [
                    "anet_entities_test_1_para.json",
                    "anet_entities_test_2_para.json"
                ]
            ]
        }
    else:  # yc2
        reference_files_map = {
            "val":
            [os.path.join(opt.data_dir, "yc2_val_anet_format_para.json")]
        }

    # COCO language evaluation
    eval_references = reference_files_map[eval_mode]
    lang_filepath = res_filepath.replace(".json", "_lang.json")
    eval_cmd = [
        "python", "para-evaluate.py", "-s", res_filepath, "-o", lang_filepath,
        "-v", "-r"
    ] + eval_references
    subprocess.call(eval_cmd, cwd=opt.eval_tool_dir)

    # basic stats
    stat_filepath = res_filepath.replace(".json", "_stat.json")
    eval_stat_cmd = [
        "python", "get_caption_stat.py", "-s", res_filepath, "-r",
        eval_references[0], "-o", stat_filepath, "-v"
    ]
    subprocess.call(eval_stat_cmd, cwd=opt.eval_tool_dir)

    # repetition evaluation
    rep_filepath = res_filepath.replace(".json", "_rep.json")
    eval_rep_cmd = [
        "python", "evaluateRepetition.py", "-s", res_filepath, "-r",
        eval_references[0], "-o", rep_filepath
    ]
    subprocess.call(eval_rep_cmd, cwd=opt.eval_tool_dir)

    # save results
    logger.info("Finished eval {}.".format(eval_mode))
    metric_filepaths = [lang_filepath, stat_filepath, rep_filepath]
    all_metrics = merge_dicts([load_json(e) for e in metric_filepaths])

    all_metrics_filepath = res_filepath.replace(".json", "_all_metrics.json")
    save_json(all_metrics, all_metrics_filepath, save_pretty=True)
    return all_metrics, [res_filepath, all_metrics_filepath]
コード例 #8
0
ファイル: lambdafunction.py プロジェクト: ixolt/scar
 def set_config_file_properties(self):
     config_file_props = ConfigFile().get_aws_props()
     self.properties = utils.merge_dicts(self.properties, config_file_props['lambda'])
     self.properties['iam'] = config_file_props['iam']
     self.properties['cloudwatch'] = config_file_props['cloudwatch']
コード例 #9
0
def main():
    parser = argparse.ArgumentParser(description="translate.py")

    parser.add_argument("--eval_splits", type=str, nargs="+", default=["val", ],
                        choices=["val", "test"], help="evaluate on val/test set, yc2 only has val")
    parser.add_argument("--res_dir", required=True, help="path to dir containing model .pt file")
    parser.add_argument("--batch_size", type=int, default=100, help="batch size")

    # beam search configs
    parser.add_argument("--use_beam", action="store_true", help="use beam search, otherwise greedy search")
    parser.add_argument("--beam_size", type=int, default=2, help="beam size")
    parser.add_argument("--n_best", type=int, default=1, help="stop searching when get n_best from beam search")
    parser.add_argument("--min_sen_len", type=int, default=5, help="minimum length of the decoded sentences")
    parser.add_argument("--max_sen_len", type=int, default=30, help="maximum length of the decoded sentences")
    parser.add_argument("--block_ngram_repeat", type=int, default=0, help="block repetition of ngrams during decoding.")
    parser.add_argument("--length_penalty_name", default="none",
                        choices=["none", "wu", "avg"], help="length penalty to use.")
    parser.add_argument("--length_penalty_alpha", type=float, default=0.,
                        help="Google NMT length penalty parameter (higher = longer generation)")
    parser.add_argument("--eval_tool_dir", type=str, default="./densevid_eval")

    parser.add_argument("--no_cuda", action="store_true")
    parser.add_argument("--seed", default=2019, type=int)
    parser.add_argument("--debug", action="store_true")

    opt = parser.parse_args()
    opt.cuda = not opt.no_cuda

    # random seed
    random.seed(opt.seed)
    np.random.seed(opt.seed)
    torch.manual_seed(opt.seed)

    checkpoint = torch.load(os.path.join(opt.res_dir, "model.chkpt"))

    # add some of the train configs
    train_opt = checkpoint["opt"]  # EDict(load_json(os.path.join(opt.res_dir, "model.cfg.json")))
    for k in train_opt.__dict__:
        if k not in opt.__dict__:
            setattr(opt, k, getattr(train_opt, k))
    print("train_opt", train_opt)

    decoding_strategy = "beam{}_lp_{}_la_{}".format(
        opt.beam_size, opt.length_penalty_name, opt.length_penalty_alpha) if opt.use_beam else "greedy"
    save_json(vars(opt),
              os.path.join(opt.res_dir, "{}_eval_cfg.json".format(decoding_strategy)),
              save_pretty=True)

    if opt.dset_name == "anet":
        reference_files_map = {
            "val": [os.path.join(opt.data_dir, e) for e in
                    ["anet_entities_val_1_para.json", "anet_entities_val_2_para.json"]],
            "test": [os.path.join(opt.data_dir, e) for e in
                     ["anet_entities_test_1_para.json", "anet_entities_test_2_para.json"]]}
    else:  # yc2
        reference_files_map = {"val": [os.path.join(opt.data_dir, "yc2_val_anet_format_para.json")]}
    for eval_mode in opt.eval_splits:
        print("Start evaluating {}".format(eval_mode))
        # add 10 at max_n_sen to make the inference stage use all the segments
        eval_data_loader = get_data_loader(opt, eval_mode=eval_mode)
        eval_references = reference_files_map[eval_mode]

        # setup model
        translator = Translator(opt, checkpoint)

        pred_file = os.path.join(opt.res_dir, "{}_pred_{}.json".format(decoding_strategy, eval_mode))
        pred_file = os.path.abspath(pred_file)
        if not os.path.exists(pred_file):
            json_res = run_translate(eval_data_loader, translator, opt=opt)
            save_json(json_res, pred_file, save_pretty=True)
        else:
            print("Using existing prediction file at {}".format(pred_file))

        # COCO language evaluation
        lang_file = pred_file.replace(".json", "_lang.json")
        eval_command = ["python", "para-evaluate.py", "-s", pred_file, "-o", lang_file,
                        "-v", "-r"] + eval_references
        subprocess.call(eval_command, cwd=opt.eval_tool_dir)

        # basic stats
        stat_filepath = pred_file.replace(".json", "_stat.json")
        eval_stat_cmd = ["python", "get_caption_stat.py", "-s", pred_file, "-r", eval_references[0],
                         "-o", stat_filepath, "-v"]
        subprocess.call(eval_stat_cmd, cwd=opt.eval_tool_dir)

        # repetition evaluation
        rep_filepath = pred_file.replace(".json", "_rep.json")
        eval_rep_cmd = ["python", "evaluateRepetition.py", "-s", pred_file,
                        "-r", eval_references[0], "-o", rep_filepath]
        subprocess.call(eval_rep_cmd, cwd=opt.eval_tool_dir)

        metric_filepaths = [lang_file, stat_filepath, rep_filepath]
        all_metrics = merge_dicts([load_json(e) for e in metric_filepaths])
        all_metrics_filepath = pred_file.replace(".json", "_all_metrics.json")
        save_json(all_metrics, all_metrics_filepath, save_pretty=True)

        print("pred_file {} lang_file {}".format(pred_file, lang_file))
        print("[Info] Finished {}.".format(eval_mode))