Exemplo n.º 1
0
    def create_network(self) -> nx.Graph:
        graph = nx.Graph()
        with open(TIMES_JSON, "r") as f:
            travel_times = commentjson.load(f)["times"]
            # [
            #   ["bel", "dub", 2.10],
            #   ["dub", "c*k", 2.45],
            #           ...
            # ]
        with open(TIMES_JSON2, "r") as f:
            travel_times = travel_times + commentjson.load(f)["times"]

        # this is necessary, because the json files store the travel durations as
        # 2.45 -> 2 hrs 45 mins
        def to_minutes(uvw):
            u, v, w = uvw
            return u, v, int(w // 1 * 60 + w % 1 * 100)

        times_in_mins = [to_minutes(time) for time in travel_times]
        graph.add_weighted_edges_from(times_in_mins)

        stations_in_the_graph = list(nx.nodes(graph))
        number_of_stations_in_graph = 0
        for station_code, station in self.stations.items():
            station.in_graph = station_code in stations_in_the_graph
            number_of_stations_in_graph += int(station.in_graph)
        print(
            f"{number_of_stations_in_graph} of {len(self.stations)} train stations "
            f"are added to the graph.")
        return graph
Exemplo n.º 2
0
def compare_generated_json_files():
    Log().info("Comparing json files")
    cpu_json_file = utils.try_open("zout_cpu.json", "r")
    gpu_json_file = utils.try_open("zout_gpu.json", "r")
    if (cpu_json_file is None) and (gpu_json_file is None):
        return Status.OK
    if cpu_json_file is None:
        Log().error("Cpu json out file is missing")
        return Status.FAILED
    if gpu_json_file is None:
        Log().error("Gpu json out file is missing")
        return Status.FAILED

    try:
        cpu_json = commentjson.load(cpu_json_file)
    except Exception as e:
        Log().error("Failed to load cpu_json : {}".format(e))
        return Status.FAILED
    try:
        gpu_json = commentjson.load(gpu_json_file)
    except Exception as e:
        Log().error("Failed to load gpu_json : {}".format(e))
        return Status.FAILED

    return compare_json_files(cpu_json, gpu_json)
Exemplo n.º 3
0
def load_experiment_config(s):
    if s.lower() == 'stdin':
        json = commentjson.load(sys.stdin)
    else:
        with open(u.validated_file(s)) as f:
            json = commentjson.load(f)

    return validate_experiment_config(json)
Exemplo n.º 4
0
def process_json(hardware_vendor, opersys, device_type, policy_file):
    if hardware_vendor == 'cisco' and opersys == 'ios' and device_type == 'firewall':
        with open(
                "{}/superloop_code/policy/cisco/ios/firewall/{}".format(
                    get_home_directory(), policy_file), 'r') as json_file:
            data = commentjson.load(json_file)

    elif hardware_vendor == 'juniper' and opersys == 'junos' and device_type == 'vfirewall':
        with open(
                "{}/superloop_code/policy/juniper/junos/firewall/{}".format(
                    get_home_directory(), policy_file), 'r') as json_file:
            data = commentjson.load(json_file)

    return data
Exemplo n.º 5
0
def process_json(platform, os, device_type, policy_file):

    if (platform == 'cisco' and os == 'ios' and device_type == 'firewall'):
        with open("/policy/cisco/ios/firewall/{}".format(
                policy_file)) as json_file:
            data = commentjson.load(json_file)

    elif (platform == 'juniper' and os == 'junos'
          and device_type == 'vfirewall'):
        with open("/policy/juniper/junos/firewall/{}".format(
                policy_file)) as json_file:
            data = commentjson.load(json_file)

    return data
Exemplo n.º 6
0
def expireShares(baseDir: str, nickname: str, domain: str) -> None:
    """Removes expired items from shares
    """
    handleDomain = domain
    if ':' in handleDomain:
        handleDomain = domain.split(':')[0]
    handle = nickname + '@' + handleDomain
    sharesFilename = baseDir + '/accounts/' + handle + '/shares.json'
    if os.path.isfile(sharesFilename):
        with open(sharesFilename, 'r') as fp:
            sharesJson = commentjson.load(fp)
            currTime = int(time.time())
            deleteItemID = []
            for itemID, item in sharesJson.items():
                if currTime > item['expire']:
                    deleteItemID.append(itemID)
            if deleteItemID:
                for itemID in deleteItemID:
                    del sharesJson[itemID]
                    # remove any associated images
                    itemIDfile = baseDir + '/sharefiles/' + nickname + '/' + itemID
                    if os.path.isfile(itemIDfile + '.png'):
                        os.remove(itemIDfile + '.png')
                    if os.path.isfile(itemIDfile + '.jpg'):
                        os.remove(itemIDfile + '.jpg')
                    if os.path.isfile(itemIDfile + '.gif'):
                        os.remove(itemIDfile + '.gif')
                with open(sharesFilename, 'w') as fp:
                    commentjson.dump(sharesJson, fp, indent=4, sort_keys=True)
Exemplo n.º 7
0
    def load(cls, path):
        import commentjson

        with open(path) as f:
            data = commentjson.load(f)

        return cls(**freeze(data))
Exemplo n.º 8
0
def capabilitiesReceiveUpdate(baseDir :str, \
                              nickname :str,domain :str,port :int, \
                              actor :str, \
                              newCapabilitiesId :str, \
                              capabilityList :[], debug :bool) -> bool:
    """An update for a capability or the given actor has arrived
    """
    ocapFilename= \
        getOcapFilename(baseDir,nickname,domain,actor,'granted')
    if not ocapFilename:
        return False

    if not os.path.isfile(ocapFilename):
        if debug:
            print('DEBUG: capabilities file not found during update')
            print(ocapFilename)
        return False

    with open(ocapFilename, 'r') as fp:
        ocapJson=commentjson.load(fp)
        ocapJson['id']=newCapabilitiesId
        ocapJson['capability']=capabilityList
    
        with open(ocapFilename, 'w') as fp:
            commentjson.dump(ocapJson, fp, indent=4, sort_keys=False)
            return True
    return False
Exemplo n.º 9
0
def load_theme_settings():
    """Reload all settings from the default settings path.
    
    The exceptions are not immediately thrown since the first time the settings are loaded, the 
    app has not been initialized. So we wait until it is, and then display an error dialog if there
    is a previously recorded error.
    """
    global _theme, _settings, _settings_err
    cur_settings = dict()
    if os.path.isfile(GetThemeSettingsPath ()):
        with open(GetThemeSettingsPath (), 'r') as fp:
            try:
                cur_settings = commentjson.load(fp)
            except JSONLibraryException as e:
                _settings_err = e
                if _theme is not None:
                    # theme has already been loaded, so don't modify it. Otherwise load all
                    # defaults.
                    return
    try:
        temp = root_schema.load(cur_settings)
        config = temp
    except ValidationError as e:
        _settings_err = e
        if _theme is not None:
            return
        config = root_schema.load({})
    _theme = copy.copy(config['theme'])
    _settings = copy.copy(BUILTIN_SETTINGS)
Exemplo n.º 10
0
def compute_occupation_stats(results_dir, patch=False):
    # Get labels
    data_file = os.path.join(results_dir, 'data.json')
    with open(data_file) as f:
        labels = json.load(f)['label_key_name']

    volume_differences = {label: 0 for label in labels}

    # Get file
    csv_pattern = 'Whole_image_*.csv' if patch else 'Val_*.csv'
    file = glob.glob(os.path.join(results_dir, csv_pattern))[-1]

    # Create dataframe out of CSV file
    df = pd.read_csv(file, index_col=0)

    # Compute occupation
    for label in labels:
        occupied = df[f'occupied_volume_{label}']
        predicted_occupied = df[f'predicted_occupied_volume_{label}']
        differences = occupied - predicted_occupied
        volume_differences[label] = {
            'mean': np.mean(differences),
            'std': np.std(differences),
            'min': np.min(differences),
            'max': np.max(differences),
        }

    # print(volume_differences)
    return volume_differences
Exemplo n.º 11
0
def compute_occupation_percentage(results_dir, use_all_files=False):
    # Get labels
    data_file = os.path.join(results_dir, 'data.json')
    with open(data_file) as f:
        labels = json.load(f)['label_key_name']

    occupations = {label: 0 for label in labels}

    # Get file
    files = glob.glob(os.path.join(results_dir, 'Train_*.csv'))

    if not use_all_files:
        files = files[:1]

    for file in files:
        # Create dataframe out of CSV file
        df = pd.read_csv(file, index_col=0)

        # Compute occupation
        for label in labels:
            occupied = df[f'occupied_volume_{label}']
            occupations[label] += (occupied == 0).sum() / len(occupied)

    for label in labels:
        occupations[label] *= 100 / len(files)

    print(occupations)
Exemplo n.º 12
0
    def __init__(self, reward, arguments, event):
        threading.Thread.__init__(self)  # call init of Parent-Class "Thread"
        self.event = event
        self.reward = reward
        self.arguments = arguments

        self.conf = commentjson.load(open(BALLTRACKERCONFIG))
        # init Farbwerte von grosser Kugel und kleinem Ball
        # Achtung: Upper / Lower in HSV.Format!!
        self.color1 = eval(self.conf["color1"])
        self.lower1 = eval(self.conf["lower1"])
        self.upper1 = eval(self.conf["upper1"])
        self.color2 = eval(self.conf["color2"])
        self.lower2 = eval(self.conf["lower2"])
        self.upper2 = eval(self.conf["upper2"])
        # Punkte fuer "Leucht-Schwanz" des kleinen Balls
        self.pts = deque(maxlen=self.arguments["buffer"])

        # if a video path was not supplied, grab the reference
        # to the webcam
        if not self.arguments.get("video", False):
            self.vs = VideoStream(src=0).start()
        else:
            # otherwise, grab a reference to the video file
            self.vs = cv2.VideoCapture(self.arguments["video"])

        # allow the camera or video file to warm up
        time.sleep(self.conf["camera_warmup_time"])
Exemplo n.º 13
0
def generate_master_boot_image(image_conf: click.File) -> None:
    """Generate MasterBootImage from json configuration file."""
    config_data = json.load(image_conf)
    config = elftosb_helper.MasterBootImageConfig(config_data)
    app = load_binary(config.input_image_file)
    load_addr = config.output_image_exec_address
    trustzone = _get_trustzone(config)
    image_type = _get_master_boot_image_type(config)
    dual_boot_version = config.dual_boot_version
    firmware_version = config.firmware_version

    cert_block = None
    signature_provider = None
    if MasterBootImageType.is_signed(image_type):
        cert_config = elftosb_helper.CertificateBlockConfig(config_data)
        cert_block = _get_cert_block_v31(cert_config)
        if cert_config.use_isk:
            signing_private_key_path = cert_config.isk_private_key_file
        else:
            signing_private_key_path = cert_config.main_root_private_key_file
        signature_provider = SignatureProvider.create(
            f'type=file;file_path={signing_private_key_path}')

    mbi = MasterBootImageN4Analog(app=app,
                                  load_addr=load_addr,
                                  image_type=image_type,
                                  trust_zone=trustzone,
                                  dual_boot_version=dual_boot_version,
                                  firmware_version=firmware_version,
                                  cert_block=cert_block,
                                  signature_provider=signature_provider)
    mbi_data = mbi.export()

    write_file(mbi_data, config.master_boot_output_file, mode='wb')
def load_json():
    global settings
    try:
        json_file = open("AmongUsDB.json")
    except FileNotFoundError:
        json_file = open("AmongUsDB.json.json")

    settings_data = commentjson.load(json_file)
    json_file.close()
    print("Settings from file:", settings_data)

    class Settings:
        def __init__(self, json_data):
            self.meeting_end_mute_delay = int(
                json_data["preferences"]["meeting_end_mute_delay"])
            self.game_start_mute_delay = int(
                json_data["preferences"]["game_start_mute_delay"])
            self.tshark_location = json_data["settings"]["tshark_location"]
            self.interface = json_data["settings"]["interface"]
            self.server_port = int(json_data["settings"]["server_port"])
            self.client_port = json_data["settings"]["client_port"]
            if self.client_port != "":
                self.client_port = int(self.client_port)
            self.delay_between_mutes = float(
                json_data["settings"]["delay_between_mutes"])
            self.channel_voice_id = int(
                json_data["discord"]["channel_voice_id"])
            self.bot_token = json_data["discord"]["token"]
            self.unmute_users_on_other_channels = json_data["preferences"][
                "unmute_users_on_other_channels"]

    settings = Settings(settings_data)
Exemplo n.º 15
0
def setRole(baseDir: str,nickname: str,domain: str, \
            project: str,role: str) -> bool:
    """Set a person's role within a project
    Setting the role to an empty string or None will remove it
    """
    # avoid giant strings
    if len(role) > 128 or len(project) > 128:
        return False
    actorFilename = baseDir + '/accounts/' + nickname + '@' + domain + '.json'
    if not os.path.isfile(actorFilename):
        return False
    with open(actorFilename, 'r') as fp:
        actorJson = commentjson.load(fp)
        if role:
            # add the role
            if project == 'instance' and 'role' == 'moderator':
                addModerator(baseDir, nickname, domain)
            if actorJson['roles'].get(project):
                if role not in actorJson['roles'][project]:
                    actorJson['roles'][project].append(role)
            else:
                actorJson['roles'][project] = [role]
        else:
            # remove the role
            if project == 'instance':
                removeModerator(baseDir, nickname)
            if actorJson['roles'].get(project):
                actorJson['roles'][project].remove(role)
                # if the project contains no roles then remove it
                if len(actorJson['roles'][project]) == 0:
                    del actorJson['roles'][project]
        with open(actorFilename, 'w') as fp:
            commentjson.dump(actorJson, fp, indent=4, sort_keys=False)
    return True
Exemplo n.º 16
0
def load_json():
    global settings
    try:
        json_file = open("settings.json", encoding="utf8")
    except FileNotFoundError:
        json_file = open("settings.json.json", encoding="utf8")

    settings_data = commentjson.load(json_file)
    json_file.close()
    print("Settings from file:", settings_data)

    class Settings:
        def __init__(self, json_data):
            self.bot_server_port = json_data["settings"]["bot_server_port"]
            if self.bot_server_port == "":
                self.bot_server_port = int(os.environ['PORT'])
            else:
                self.bot_server_port = int(self.bot_server_port)
            self.bot_server_url = json_data["settings"]["bot_server_url"]

            self.bot_token = json_data["discord"]["token"]
            self.room_creation_voice_id = json_data["discord"]["room_creation_voice_id"]
            self.general_voice_id = json_data["discord"]["general_voice_id"]
            self.room_category_id = int(json_data["discord"]["room_category_id"])
            self.max_users_in_room = int(json_data["discord"]["max_users_in_room"])
            self.room_prefix = json_data["discord"]["room_prefix"]
            self.room_creation_role = json_data["discord"]["room_creation_role"]

            if self.general_voice_id != "":
                self.general_voice_id = int(self.general_voice_id)
            if self.room_creation_voice_id != "":
                self.room_creation_voice_id = int(self.room_creation_voice_id)


    settings = Settings(settings_data)
Exemplo n.º 17
0
async def main():
    print(f"Starting {PROJECT_NAME}...")
    bot.launch_time = datetime.utcnow()
    bot.debug_mode = '--debug' in argv
    set_base_directories(bot)

    bot_data = {}
    data_filenames = ["auth.jsonc", "quotes.jsonc"]

    if bot.debug_mode:
        print("Running in debug mode.")
        config_name = "beta.jsonc"
        db_name = "dbBeta.sqlite3"
    else:
        config_name = "config.jsonc"
        db_name = "db.sqlite3"

    db_migration_setup(db_name)
    db_file = Path(DATA_DIR, db_name)
    data_filenames.insert(0, config_name)

    for filename in data_filenames:
        with open(Path(CONFIG_DIR, filename), encoding="UTF-8") as json_file:
            bot_data.update(commentjson.load(json_file))
    bot.__dict__.update(bot_data)

    async with aiosqlite.connect(db_file) as conn, bot:
        bot.database_conn = conn

        await create_database_schema(conn)
        await bot.load_all_extensions()
        await bot.start(bot.koa['token'])
Exemplo n.º 18
0
def load_experiment_config(s):
    if isinstance(s, str) and s.lower() == 'stdin':
        json = commentjson.load(sys.stdin)
    else:
        json = ua.argparse_schema(us.Json)(s)

    return validate_experiment_config(json)
Exemplo n.º 19
0
def load_fixtures():
    """
        Destroy the current database contents and load fixtures
        supplied in ./tests/fixtures/ instead.
    """
    import glob
    from flask_fixtures.loaders import JSONLoader
    from flask_fixtures import load_fixtures as flask_load_fixtures

    db.drop_all()
    db.create_all()

    _fx_path = os.path.join(os.path.dirname(__file__), 'tmp', 'fixtures')

    if not os.path.isdir(_fx_path):
        os.mkdir(_fx_path)

    for fixture_dir in app.config.get('FIXTURES_DIRS', ['./tests/fixtures/']):
        for fixture_file in glob.glob(fixture_dir + '/*.json'):
            with open(fixture_file, 'r') as x:
                target_file = os.path.join(_fx_path,
                                           os.path.basename(fixture_file))

                contents = commentjson.load(x)

                with open(target_file, 'w') as y:
                    json.dump(contents, y)

            fixtures = JSONLoader().load(target_file)
            flask_load_fixtures(db, fixtures)
            db.session.commit()
Exemplo n.º 20
0
def compute_bin_dice_score_stats(results_dir, patch=False):
    # Get labels
    data_file = os.path.join(results_dir, 'data.json')
    with open(data_file) as f:
        labels = json.load(f)['label_key_name']

    dice_scores = {label: 0 for label in labels}

    # Get file
    csv_pattern = 'Whole_image_*.csv' if patch else 'Val_*.csv'
    file = glob.glob(os.path.join(results_dir, csv_pattern))[-1]

    # Create dataframe out of CSV file
    df = pd.read_csv(file, index_col=0)

    # Compute occupation
    for label in labels:
        dice_loss = df[f'metric_bin_dice_loss_{label}']
        dice_scores[label] = {
            'mean': 1 - np.mean(dice_loss),
            'std': np.std(dice_loss),
            'min': 1 - np.max(dice_loss),
            'max': 1 - np.min(dice_loss)
        }

    # print(dice_scores)
    return dice_scores
Exemplo n.º 21
0
 def loadf(self, path: str) -> T:
     if not os.path.isfile(path):
         raise Exception(f"Path {path} does not exist.")
     data = None
     with open(path, 'r') as file:
         data = json.load(file)
     return self.load(data)
def get_sample_names(sample_names_path, configurations):
    if sample_names_path:
        sample_names_file = open(sample_names_path, "r")
        sample_names_json = commentjson.load(sample_names_file)
        return set(sample_names_json["names"])
    else:
        return {config["name"] for config in configurations}
Exemplo n.º 23
0
def main(_):
    utils.set_gpus_to_use()

    if tf.app.flags.FLAGS.hypes is None:
        logging.error("No hype file is given.")
        logging.info("Usage: python train.py --hypes hypes/KittiClass.json")
        exit(1)

    with open(tf.app.flags.FLAGS.hypes, 'r') as f:
        logging.info("f: %s", f)
        hypes = commentjson.load(f)

    utils.load_plugins()

    if tf.app.flags.FLAGS.mod is not None:
        import ast
        mod_dict = ast.literal_eval(tf.app.flags.FLAGS.mod)
        dict_merge(hypes, mod_dict)

    if 'TV_DIR_RUNS' in os.environ:
        os.environ['TV_DIR_RUNS'] = os.path.join(os.environ['TV_DIR_RUNS'],
                                                 'KittiSeg')
    utils.set_dirs(hypes, tf.app.flags.FLAGS.hypes)

    utils._add_paths_to_sys(hypes)

    train.maybe_download_and_extract(hypes)
    logging.info("Initialize training folder")
    train.initialize_training_folder(hypes)
    logging.info("Start finetuning")
    do_finetuning(hypes)
Exemplo n.º 24
0
def main(args):
    macros = []
    json_config = None
    with open(args.json_config_file, 'r') as f:
        json_config = commentjson.load(f)

    if not isinstance(json_config, list):
        json_config = [json_config]

    schema = DirectionalMacroConfigSchema()
    for directional_config in json_config:
        macro = DirectionalMacro(schema.load(directional_config))
        print("Mapping pairs:")
        for from_key, to_key in directional_config["direction_pairs"]:
            print(f"  {from_key} => {to_key}")
        print("Spell rotation:")
        for rotation in directional_config["spell_key_rotation"]:
            print(f"  {rotation}")
        macro.hook_hotkey()
        macros.append(macro)

    try:
        print('Press [Enter] to exit.')
        keyboard.wait('enter')
    finally:
        for macro in macros:
            macro.unhook_hotkey()
Exemplo n.º 25
0
    def rest(self):
        """ REST endpoints from '.ini' files """

        logger.debug("Trying configurations from '%s' dir" % REST_CONFIG)

        files = []
        if os.path.exists(REST_INIT):
            import commentjson as json
            with open(REST_INIT) as f:
                mydict = json.load(f)
                for name, jfile in iteritems(mydict):
                    files.append(os.path.join(REST_CONFIG, jfile))
        # What if the user does not specify anything?
        else:
            # # ALL ?
            # logger.debug("Reading all resources config files")
            # import glob
            # files = glob.glob(os.path.join(REST_CONFIG, "*") + ".ini")

            # # ONLY THE EXAMPLE
            files.append(os.path.join(REST_CONFIG, DEFAULT_REST_CONFIG))
        logger.debug("Resources files: '%s'" % files)

        resources = []
        for ini_file in files:
            logger.info("REST configuration file '%s'" % ini_file)
            # Add all resources from this single ini file
            resources.extend(self.single_rest(ini_file))

        return resources
Exemplo n.º 26
0
def read_config(file_path):

    """
    Parses config file.
    """
    with open(file_path) as config_json:
        return commentjson.load(config_json)
    def load_ami_cache(template, create_missing_files=True):
        """
        Method gets the ami cache from the file locally and adds a mapping for ami ids per region into the template
        This depends on populating ami_cache.json with the AMI ids that are output by the packer scripts per region
        @param template The template to attach the AMI mapping to
        @param create_missing_file File loading policy, if true
        """
        file_path = None

        # Users can provide override ami_cache in their project root
        local_amicache = os.path.join(os.getcwd(), res.DEFAULT_AMI_CACHE_FILENAME)
        if os.path.isfile(local_amicache):
            file_path = local_amicache

        # Or sibling to the executing class
        elif os.path.isfile(res.DEFAULT_AMI_CACHE_FILENAME):
            file_path = res.DEFAULT_AMI_CACHE_FILENAME

        if file_path:
            with open(file_path, 'r') as json_file:
                json_data = json.load(json_file)
        elif create_missing_files:
            json_data = res.FACTORY_DEFAULT_AMI_CACHE
            with open(res.DEFAULT_AMI_CACHE_FILENAME, 'w') as f:
                f.write(json.dumps(res.FACTORY_DEFAULT_AMI_CACHE, indent=4, separators=(',', ': ')))
        else:
            raise IOError(res.DEFAULT_AMI_CACHE_FILENAME + ' could not be found')

        template.add_ami_mapping(json_data)
Exemplo n.º 28
0
def savePlayer(player, masterDB, path = str(Config.get('Players', 'Location')) + "/"):
	#print(path)
	DB = loadPlayersDB(forceLowercase = False)
	for p in DB:
		if (player['name'] + ".player").lower() == p.lower():
			#print("found the file")
			#print(p)
			with open(path + p, "r") as read_file:
				temp = commentjson.load(read_file)
			#print(temp)
			silentRemove(path + player['name'] + ".player")
			#print("removed file")
			newPlayer = deepcopy(temp)
			#print(newPlayer)
			#newPlayer['pwd'] = hash_password(temp['pwd'])
			newPlayer['pwd'] = temp['pwd']
			for key in newPlayer:
				if key != "pwd":
					# print(key)
					newPlayer[key] = player[key]
			#print(newPlayer)
			#print("Saving player state")
			with open(path + player['name'] + ".player", 'w') as fp:
				commentjson.dump(newPlayer, fp)
			#print("Updating playerd DB")
			masterDB = loadPlayersDB()
Exemplo n.º 29
0
 def test_load(self):
     for file_ in self.files:
         rfp = open(os.path.join(self.path, '%s-commented.json' % file_),
                    'r')
         uncommented = self.test_json[file_]['uncommented']
         assert commentjson.load(rfp) == json.loads(uncommented)
         rfp.close()
Exemplo n.º 30
0
def _decode_json_file(path=None, ignore_comments=True):
    """
    Read all settings from VSCode.

    Args

        path (str):
            path of the settings file

        ignore_comments (bool):
            json doesn't support comments. Remove all comments from file for avoid errors when parsing json

    Returns

        dict:
            dict of settings
    """
    path = _settings_path(path)

    try:
        if os.path.isfile(path):
            if ignore_comments:
                return commentjson.load(salt.utils.files.fopen(path))
            else:
                return salt.utils.json.load(salt.utils.files.fopen(path))
        else:
            return {}
    except Exception:
        raise salt.exceptions.CommandExecutionError(
            "{} is not a valid json file".format(path))
Exemplo n.º 31
0
    def rest(self):
        """ REST endpoints from '.ini' files """

        logger.debug("Trying configurations from '%s' dir" % REST_CONFIG)

        files = []
        if os.path.exists(REST_INIT):
            import commentjson as json
            with open(REST_INIT) as f:
                mydict = json.load(f)
                for name, jfile in iteritems(mydict):
                    files.append(os.path.join(REST_CONFIG, jfile))
        # What if the user does not specify anything?
        else:
            # # ALL ?
            # logger.debug("Reading all resources config files")
            # import glob
            # files = glob.glob(os.path.join(REST_CONFIG, "*") + ".ini")

            # # ONLY THE EXAMPLE
            files.append(os.path.join(REST_CONFIG, DEFAULT_REST_CONFIG))
        logger.debug("Resources files: '%s'" % files)

        resources = []
        for ini_file in files:
            logger.info("REST configuration file '%s'" % ini_file)
            # Add all resources from this single ini file
            resources.extend(self.single_rest(ini_file))

        return resources
Exemplo n.º 32
0
def old_get_params(fname, defaults_fname=None, proto=False):
    """
    Returns an object with fields defined for every value in the json file.  Hardcodes its own file location
    to the 'params' entry.
    proto: whether this is a prototype params file or not.  If it is, then we do not unroll stages (because it is flat)
    """
    with open(fname, 'r') as f:
        params = json.load(f)
        if 'wildcards' in params:
            params = resolve_wildcards(params['wildcards'], params)
        if defaults_fname is not None:
            with open(defaults_fname, 'r') as def_f:
                default_params = json.load(def_f)
                params = merge_defaults(default_params, params)
        params = _json_object_hook(params, fname, proto=proto)
        return params
Exemplo n.º 33
0
def update_config_files(config_files, new_values, save_dir=None):
    """

    Parameters
    ----------
    config_files : :obj:`dict`
        absolute paths to base config files
    new_values : :obj:`dict` of :obj:`dict`
        keys correspond to those in :obj:`config_files`; values are dicts with key-value pairs
        defining which keys in the config file are updated with which values
    save_dir : :obj:`str` or :obj:`NoneType`, optional
        if not None, directory in which to save updated config files; filename will be same as
        corresponding base json

    Returns
    -------
    :obj:`tuple`
        (updated config dicts, updated config files)

    """
    new_config_dicts = {}
    new_config_files = {}
    for config_name, config_file in config_files.items():
        # load base config file into dict
        config_dict = commentjson.load(open(config_file, 'r'))
        # change key/value pairs
        for key, val in new_values[config_name].items():
            config_dict[key] = val
        new_config_dicts[config_name] = config_dict
        # save as new config file in save_dir
        if save_dir is not None:
            filename = os.path.join(save_dir, os.path.basename(config_file))
            new_config_files[config_name] = filename
            json.dump(config_dict, open(filename, 'w'))
    return new_config_dicts, new_config_files
Exemplo n.º 34
0
    def test_load_with_kwargs(self):
        def test_hook(loaded_dict):
            return {}
        test_kwargs = dict(object_hook=test_hook)
        rfp = open(os.path.join(self.path, 'sample-commented.json'), 'r')
        uncommented = self.test_json['sample']['uncommented']

        assert commentjson.load(rfp, **test_kwargs) == {}
	def overrides(self):
		arkMode = os.environ.get('ARK_MODE', None)
		if arkMode:
			try:
				with open(self.rootDir+'/'+ arkMode + '.json') as f:
					extraSettings = json.load(f)
					self.settings.update(extraSettings)
			except:
				pass
Exemplo n.º 36
0
def read_files(path):
    """ All user specifications """
    sections = ['content', 'models', 'options', 'frameworks']
    myjson = {}
    for section in sections:
        filename = os.path.join(CONFIG_PATH, path, section + "." + JSON_EXT)
        with open(filename) as f:
            myjson[section] = json.load(f)
        # if section == 'frameworks':
        #     print(myjson[section])
    return myjson
 def overrides(self):
     if self.user:
         # the pathname is reset to the user specific files
         self.pathname = self.rootDir + "/" + self.appName + "." + self.user + ".json"
         try:
             with open(self.pathname) as f:
                 extraSettings = json.load(f)
                 self.settings.update(extraSettings)
                 for setting in extraSettings:
                     setattr(self, setting, self.get(setting))
         except:
             raise IOError("This user does not exist yet!")
Exemplo n.º 38
0
def schema_and_tables(fileschema):
    """
    This function can recover basic data for my JSON resources
    """
    template = None
    with open(os.path.join(JSONS_PATH, fileschema + JSONS_EXT)) as f:
        template = json.load(f)
    reference_schema = convert_to_marshal(template)
    label = os.path.splitext(
        os.path.basename(fileschema))[0].lower()

    return label, template, reference_schema
 def set(self, key, value=None):
     with open(self.pathname, "w+") as f:
         extraSettings = json.load(f)
         if value:
             self.settings[key] = value
             extraSettings[key] = value
             setattr(self, key, self.get(key))
         else:
             self.settings.update(key)
             extraSettings.update(key)
             for i in key:
                 setattr(self, i, self.get(i))
         json.dump(extraSettings, f, indent=4, sort_keys=True)
Exemplo n.º 40
0
    def run(self, edit):

        for fileName in os.listdir(users_path):
            if fileName.endswith(".sublime-settings"):
                print("running alllight on settings file: " + fileName)

                with open (os.path.join(users_path,fileName), "r+") as setup:
                    try:
                        setupObject = commentjson.load(setup)
                        self.setToLight(setupObject, fileName)
                    except:
                        # todo - maybe pass out message? could come from elsewhere
                        print("json parsing error in file: "+fileName)
Exemplo n.º 41
0
 def handle(self, *args, **options):
     shutil.os.chdir(PROJECT_PATH)
     package = {}
     configs = django_apps.get_app_configs()
     for config in configs:
         package_path = os.path.join(config.path, 'package.json')
         try:
             with open(package_path) as data_file:
                 data = commentjson.load(data_file)
         except IOError:
             continue
         deep_merge_dicts(package, data)
     with open('package.json', 'w') as outfile:
         json.dump(package, outfile)
Exemplo n.º 42
0
def getConfig():	
	import sys

	import commentjson as cj

	from constants import CONFIGFILE
	from utils import loginfo, logerr

	try:
		config = cj.load(open(CONFIGFILE))	
	except Exception as e:
		logerr("Error while reading config file.")
		logerr(e)
		sys.exit()

	return config	
Exemplo n.º 43
0
def read_files(path):
    """ All user specifications """
    sections = [
        # Basic options
        'content', 'models', 'options',
        # Framework specific and user custom files
        'frameworks',
        # Choose the blueprint to work with
        'blueprints/js_init'
    ]
    myjson = {}
    for section in sections:
        filename = os.path.join(CONFIG_PATH, path, section + "." + JSON_EXT)
        with open(filename) as f:
            name = section.split('/')[0]
            myjson[name] = json.load(f)

        # if section == 'frameworks':
        #     print(myjson[section])
    return myjson
Exemplo n.º 44
0
def main(_):
    utils.set_gpus_to_use()

    try:
        import tensorvision.train
        import tensorflow_fcn.utils
    except ImportError:
        logging.error("Could not import the submodules.")
        logging.error("Please execute:"
                      "'git submodule update --init --recursive'")
        exit(1)

    if tf.app.flags.FLAGS.hypes is None:
        logging.error("No hype file is given.")
        logging.info("Usage: python train.py --hypes hypes/KittiClass.json")
        exit(1)

    with open(tf.app.flags.FLAGS.hypes, 'r') as f:
        logging.info("f: %s", f)
        hypes = commentjson.load(f)
    utils.load_plugins()

    if tf.app.flags.FLAGS.mod is not None:
        import ast
        mod_dict = ast.literal_eval(tf.app.flags.FLAGS.mod)
        dict_merge(hypes, mod_dict)

    if 'TV_DIR_RUNS' in os.environ:
        os.environ['TV_DIR_RUNS'] = os.path.join(os.environ['TV_DIR_RUNS'],
                                                 'KittiSeg')
    utils.set_dirs(hypes, tf.app.flags.FLAGS.hypes)

    utils._add_paths_to_sys(hypes)

    train.maybe_download_and_extract(hypes)
    logging.info("Initialize training folder")
    train.initialize_training_folder(hypes)
    logging.info("Start training")
    train.do_training(hypes)
Exemplo n.º 45
0
def getJobs(state, config):
	for instance in os.listdir(JOBINSTANCES):
		job = newjob()
		job.init(jc.load(instance))
		state.runnable.append(job)

	if (state.next_stage == 'IDEAL') or (state.next_stage == 'EXEC'):
		for job in state.runnable:
			job_dir = CHKPTS + '/%d' % job.uid
			job.chkpt_loc = job_dir + '/%s' % os.listdir(job_dir)[0]
			job.init = True
			job.pick_count += 1
			if (state.next_stage == 'EXEC'):
				job.stats = getstats(job.uid)
				job.throughput_ideal = getthroughput(config, job.stats)
				job.throughput_cur = getthroughput(config, job.stats)
				job.ideal = True
				job.pick_count += 1

	sort_job_by_arrival(state)

	return state
def runTracking(options, trackingGraph, weights=None):
    """
    Track the given graph with the given weights, if None the weights will be loaded from a json file.
    **Returns** the tracking result dictionary
    """

    getLogger().info("Run tracking...")
    if weights is None:
        getLogger().info("Loading weights from " + options.weight_json_filename)
        with open(options.weight_json_filename, 'r') as f:
            weights = json.load(f)

        # if withDivisions:
        #     weights = {"weights" : [10, 10, 10, 500, 500]}
        # else:
        #     weights = {"weights" : [10, 10, 500, 500]}
    else:
        getLogger().info("Using learned weights!")

    if options.use_flow_solver:
        import dpct
        result = dpct.trackFlowBased(trackingGraph.model, weights)
    else:
        try:
            import multiHypoTracking_with_cplex as mht
        except ImportError:
            try:
                import multiHypoTracking_with_gurobi as mht
            except ImportError:
                raise ImportError("No version of ILP solver found")
        result = mht.track(trackingGraph.model, weights)
    
    if options.result_json_filename is not None:
        writeToFormattedJSON(options.result_json_filename, result)

    return result
                        help='Filename of the json file containing results')
    parser.add_argument('--label-image-file', required=True, type=str, dest='ilp_filename',
                        help='Filename of the ilastik-style segmentation HDF5 file')
    parser.add_argument('--label-image-path', dest='label_img_path', type=str,
                        default='/ObjectExtraction/LabelImage/0/[[%d, 0, 0, 0, 0], [%d, %d, %d, %d, 1]]',
                        help='internal hdf5 path to label image')
    parser.add_argument('--plugin-paths', dest='pluginPaths', type=str, nargs='+',
                        default=[os.path.abspath('../hytra/plugins')],
                        help='A list of paths to search for plugins for the tracking pipeline.')
    parser.add_argument('--h5-event-out-dir', type=str, dest='out_dir', default='.', help='Output directory for HDF5 files')
    parser.add_argument("--verbose", dest='verbose', action='store_true', default=False)
    
    args, unknown = parser.parse_known_args()

    with open(args.model_filename, 'r') as f:
        model = json.load(f)

    with open(args.result_filename, 'r') as f:
        result = json.load(f)
        assert(result['detectionResults'] is not None)
        assert(result['linkingResults'] is not None)

    if args.verbose:
        logging.basicConfig(level=logging.DEBUG)
    else:
        logging.basicConfig(level=logging.INFO)
    logging.getLogger('json_result_to_events.py').debug("Ignoring unknown parameters: {}".format(unknown))

    traxelIdPerTimestepToUniqueIdMap, uuidToTraxelMap = hytra.core.jsongraph.getMappingsBetweenUUIDsAndTraxels(model)
    # timesteps = [t for t in traxelIdPerTimestepToUniqueIdMap.keys()]
    # there might be empty frames. We want them as output too. A little messy, but:
Exemplo n.º 48
0
def read_codio_json():
    try:
        with open(run_file, 'r') as file:
            return json.load(file, object_pairs_hook=OrderedDict)
    except IOError:
        return OrderedDict()
Exemplo n.º 49
0
def readFromJSON(filename):
    ''' Read a dictionary from JSON '''
    with open(filename, 'r') as f:
        return json.load(f)
except ImportError:
    import json
import argparse

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='Compare two JSON graphs')
    parser.add_argument('--modelA', required=True, type=str, dest='modelFilenameA',
                        help='Filename of the json model description')
    parser.add_argument('--modelB', required=True, type=str, dest='modelFilenameB',
                        help='Filename of the second json model file')
    
    args = parser.parse_args()

    print("Loading model A: " + args.modelFilenameA)
    with open(args.modelFilenameA, 'r') as f:
        modelA = json.load(f)

    traxelIdPerTimestepToUniqueIdMap = modelA['traxelToUniqueId']
    timesteps = [t for t in traxelIdPerTimestepToUniqueIdMap.keys()]
    uuidToTraxelMapA = {}
    for t in timesteps:
        for i in traxelIdPerTimestepToUniqueIdMap[t].keys():
            uuid = traxelIdPerTimestepToUniqueIdMap[t][i]
            if uuid not in uuidToTraxelMapA:
                uuidToTraxelMapA[uuid] = []
            uuidToTraxelMapA[uuid].append((int(t), int(i)))

    print("Loading model B: " + args.modelFilenameB)
    with open(args.modelFilenameB, 'r') as f:
        modelB = json.load(f)
Exemplo n.º 51
0
def run_pipeline(options, unknown):
    """
    Run the complete tracking pipeline by invoking the different steps.
    Using the `do-SOMETHING` switches one can configure which parts of the pipeline are run.

    **Params:**

    * `options`: the options of the tracking script as returned from argparse
    * `unknown`: unknown parameters read from the config file, needed in case merger resolving is supposed to be run.

    """

    params = convertToDict(unknown)
    
    if options.do_extract_weights:
        logging.info("Extracting weights from ilastik project...")
        weights = hytra.core.ilastik_project_options.extractWeightDictFromIlastikProject(options.ilastik_tracking_project)
    else:
        with open(options.weight_filename, 'r') as f:
            weights = json.load(f)

    if options.do_create_graph:
        logging.info("Create hypotheses graph...")

        import hytra.core.probabilitygenerator as probabilitygenerator
        from hytra.core.ilastik_project_options import IlastikProjectOptions
        ilpOptions = IlastikProjectOptions()
        ilpOptions.labelImagePath = params[str('label-image-path')]
        ilpOptions.labelImageFilename = params[str('label-image-file')]
        ilpOptions.rawImagePath = params[str('raw-data-path')]
        ilpOptions.rawImageFilename = params[str('raw-data-file')]
        try:
            ilpOptions.rawImageAxes = params[str('raw-data-axes')]
        except:
            ilpOptions.rawImageAxes = 'txyzc'

        ilpOptions.sizeFilter = [int(params[str('min-size')]), 100000]

        if 'object-count-classifier-file' in params:
            ilpOptions.objectCountClassifierFilename = params[str('object-count-classifier-file')]
        else:
            ilpOptions.objectCountClassifierFilename = options.ilastik_tracking_project

        withDivisions = 'without-divisions' not in params
        if withDivisions:
            if 'division-classifier-file' in params:
                ilpOptions.divisionClassifierFilename = params[str('division-classifier-file')]
            else:
                ilpOptions.divisionClassifierFilename = options.ilastik_tracking_project
        else:
            ilpOptions.divisionClassifierFilename = None

        probGenerator = probabilitygenerator.IlpProbabilityGenerator(ilpOptions, 
                                              pluginPaths=[str('../hytra/plugins')],
                                              useMultiprocessing=False)

        # if time_range is not None:
        #     traxelstore.timeRange = time_range

        probGenerator.fillTraxels(usePgmlink=False)
        fieldOfView = constructFov(probGenerator.shape,
                                   probGenerator.timeRange[0],
                                   probGenerator.timeRange[1],
                                   [probGenerator.x_scale,
                                   probGenerator.y_scale,
                                   probGenerator.z_scale])

        hypotheses_graph = IlastikHypothesesGraph(
            probabilityGenerator=probGenerator,
            timeRange=probGenerator.timeRange,
            maxNumObjects=int(params[str('max-number-objects')]),
            numNearestNeighbors=int(params[str('max-nearest-neighbors')]),
            fieldOfView=fieldOfView,
            withDivisions=withDivisions,
            divisionThreshold=0.1
        )

        withTracklets = True
        if withTracklets:
            hypotheses_graph = hypotheses_graph.generateTrackletGraph()

        hypotheses_graph.insertEnergies()
        trackingGraph = hypotheses_graph.toTrackingGraph()
    else:
        trackingGraph = JsonTrackingGraph(model_filename=options.model_filename)

    if options.do_convexify:
        logging.info("Convexifying graph energies...")
        trackingGraph.convexifyCosts()

    # get model out of trackingGraph
    model = trackingGraph.model

    if options.do_tracking:
        logging.info("Run tracking...")
        if options.solver == "flow-based":
            result = dpct.trackFlowBased(model, weights)
        elif options.solver == "ilp":
            try:
                import multiHypoTracking_with_cplex as mht
            except ImportError:
                try:
                    import multiHypoTracking_with_gurobi as mht
                except ImportError:
                    raise ImportError("Could not find multi hypotheses tracking ilp solver")
            result = mht.track(model, weights)
            
        hytra.core.jsongraph.writeToFormattedJSON(options.result_filename, result)
        
        if hypotheses_graph:
            # insert the solution into the hypotheses graph and from that deduce the lineages
            hypotheses_graph.insertSolution(result)
            hypotheses_graph.computeLineage()

    if options.do_merger_resolving:
        logging.info("Run merger resolving")
        trackingGraph = JsonTrackingGraph(model=model, result=result)
        merger_resolver = JsonMergerResolver(
            trackingGraph,
            ilpOptions.labelImageFilename,
            ilpOptions.labelImagePath,
            params[str('out-label-image-file')],
            ilpOptions.rawImageFilename,
            ilpOptions.rawImagePath,
            ilpOptions.rawImageAxes,
            [str('../hytra/plugins')],
            True)
        ilpOptions.labelImagePath = params[str('label-image-path')]
        ilpOptions.labelImageFilename = params[str('label-image-file')]
        ilpOptions.rawImagePath = params[str('raw-data-path')]
        ilpOptions.rawImageFilename = params[str('raw-data-file')]
        try:
            ilpOptions.rawImageAxes = params[str('raw-data-axes')]
        except:
            ilpOptions.rawImageAxes = 'txyzc'
        merger_resolver.run(None,  None)
 def load(self):
     try:
         with open(self.pathname) as f:
             self.settings = json.load(f)
     except:
         raise
Exemplo n.º 53
0
def load_dataset(config, variables=None):
    """
    Loads a dataset according to a 
    configuration file
    
    Args:
        config (str): json style config file
    """

    # FIXME: os.path exits tests
    cfg = commentjson.load(open(config))
    categories = dict()
    weightfunctions = dict()
    models = dict()
    files_basepath = cfg["files_basepath"]
    for cat in cfg["categories"].keys():
        thiscat = cfg["categories"][cat]
        if thiscat["datatype"] == "simulation":
            categories[cat] = c.Simulation(cat)
            # remember that json keys are strings, so
            # convert to int
            datasets = {int(x): int(thiscat["datasets"][x]) for x in thiscat["datasets"]}
            categories[cat].get_files(
                os.path.join(files_basepath, thiscat["subpath"]),
                prefix=thiscat["file_prefix"],
                datasets=datasets,
                ending=thiscat["file_type"],
            )
            try:
                fluxclass, flux = thiscat["model"].split(".")
                models[cat] = getattr(dict(inspect.getmembers(fluxes))[fluxclass], flux)
            except ValueError:
                Logger.warning(
                    "{} does not seem to be a valid model for {}. This might cause troubles. If not, it is probably fine!".format(
                        thiscat["model"], cat
                    )
                )
                models[cat] = None
            weightfunctions[cat] = dict(inspect.getmembers(wgt))[thiscat["model_method"]]
        elif thiscat["datatype"] == "data":
            categories[cat] = c.Data(cat)
            categories[cat].get_files(
                os.path.join(files_basepath, thiscat["subpath"]),
                prefix=thiscat["file_prefix"],
                ending=thiscat["file_type"],
            )
            models[cat] = float(thiscat["livetime"])
            weightfunctions[cat] = dict(inspect.getmembers(wgt))[thiscat["model_method"]]

        elif thiscat["datatype"] == "reweighted":
            pass
        else:
            raise TypeError("Data type not understood. Has to be either 'simulation', 'reweighted' or 'data'!!")
    # at last we can take care of reweighted categories
    for cat in cfg["categories"].keys():
        thiscat = cfg["categories"][cat]
        if thiscat["datatype"] == "reweighted":
            categories[cat] = c.ReweightedSimulation(cat, categories[thiscat["parent"]])
            if thiscat["model"]:
                fluxclass, flux = thiscat["model"].split(".")
                models[cat] = getattr(dict(inspect.getmembers(fluxes))[fluxclass], flux)
                weightfunctions[cat] = dict(inspect.getmembers(wgt))[thiscat["model_method"]]
        elif thiscat["datatype"] in ["data", "simulation"]:
            pass
        else:
            raise TypeError("Data type not understood. Has to be either 'simulation', 'reweighted' or 'data'!!")

    # combined categories
    combined_categories = dict()
    for k in combined_categories.keys():
        combined_categories[k] = [categories[l] for l in cfg["combined_categories"]]

    # import variable defs
    vardefs = __import__(cfg["variable_definitions"])

    dataset = ds.Dataset(*categories.values(), combined_categories=combined_categories)
    dataset.read_variables(vardefs, names=variables)
    dataset.set_weightfunction(weightfunctions)
    dataset.get_weights(models=models)
    return dataset
Exemplo n.º 54
0
 def test_load(self):
     for file_ in self.files:
         rfp = open(os.path.join(self.path, '%s-commented.json' % file_),
                    'r')
         uncommented = self.test_json[file_]['uncommented']
         assert commentjson.load(rfp) == json.loads(uncommented)
Exemplo n.º 55
0
 def read_complex_config(self, configfile):
     """ A more complex configuration is available in JSON format """
     content = {}
     with open(configfile) as fp:
         content = json.load(fp)
     return content
Exemplo n.º 56
0
# Start timer.
start_time = time.time( )

print ""
print "******************************************************************"
print "                          Input parameters                        "
print "******************************************************************"
print ""

# Parse JSON configuration file
# Raise exception if wrong number of inputs are provided to script
if len(sys.argv) != 2:
		raise Exception("Only provide a JSON config file as input!")

json_data = open(sys.argv[1])
config = commentjson.load(json_data)
json_data.close()
pprint(config)

# Get plotting packages
import matplotlib

# If user's computer does not have a GUI/display then the TKAgg will not be used
if config['display'] == 'True':
		matplotlib.use('TkAgg')
else:
		matplotlib.use('Agg')

import matplotlib.colors
import matplotlib.axes
import matplotlib.lines as mlines
Exemplo n.º 57
0
     Compare two result json files, usually one of those is the ground truth...
     """, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
 parser.add_argument('--gt', dest='gtFilename', type=str, required=True, 
                     help='Filename of the ground truth result file')
 parser.add_argument('--result', dest='resultFilename', type=str, required=True, 
                     help='Filename of the JSON file of the results')
 args = parser.parse_args()
 
 # gtFilename = '/Users/chaubold/hci/data/virginie/test/generatedGroundTruth_withLoG.json'
 # resultFilename = '/Users/chaubold/hci/data/virginie/test/result.json'
 
 gtFilename = args.gtFilename
 resultFilename = args.resultFilename
 
 with open(gtFilename, 'r') as f:
     gt = json.load(f)
 
 with open(resultFilename, 'r') as f:
     result = json.load(f)
 
 gtEvents = extractEventLists(gt)
 resultEvents = extractEventLists(result)
 
 #%%
 totalTp = 0    
 totalFp = 0
 totalFn = 0
 totalGtEvents = 0
 totalResultEvents= 0
 
 for i,n in enumerate(['detections', 'moves', 'divisions']):
Exemplo n.º 58
0
# Check if the configuration file exists.
if not isfile(configFilename):
    print 'The file "' + \
            configFilename + \
            '" does not exists. Please create this file and fill required data.'
    sys.exit(1)

try:
    configFile = open(configFilename,'r')

except IOError:
    print 'File "' + configFilename + '" not found!'
    sys.exit(1)
else:
    with configFile:
        configData = commentjson.load(configFile)
        if not configData.has_key(authDelegateKey):
            print 'The config file "' + \
                    configFilename + \
                    '" is missing the field "' + \
                    authDelegateKey + \
                    '".'
            sys.exit(1)
        else:
            authDelegateDict = configData[authDelegateKey]


# Check if all required keys are parsed.
requiredKeys = [CLIENT_ID, CLIENT_SECRET, PRODUCT_ID, DEVICE_SERIAL_NUMBER]
try:
    missingKey = requiredKeys[map(authDelegateDict.has_key,requiredKeys).index(False)];
Exemplo n.º 59
0
def run_pipeline(options, unknown):
    """
    Run the complete tracking pipeline by invoking the scripts as subprocesses.
    Using the `do-SOMETHING` switches one can configure which parts of the pipeline are run.

    **Params:**

    * `options`: the options of the tracking script as returned from argparse
    * `unknown`: unknown parameters read from the config file, needed in case merger resolving is supposed to be run.

    """

    if options.do_ctc_groundtruth_conversion:
        logging.info("Convert CTC groundtruth to our format...")
        check_call(["python", os.path.abspath("ctc/ctc_gt_to_hdf5.py"), "--config", options.config_file])

    if options.do_ctc_raw_data_conversion:
        logging.info("Convert CTC raw data to HDF5...")
        check_call(["python", os.path.abspath("ctc/stack_to_h5.py"), "--config", options.config_file])
    if options.do_ctc_segmentation_conversion:

        logging.info("Convert CTC segmentation to HDF5...")
        check_call(["python", os.path.abspath("ctc/segmentation_to_hdf5.py"), "--config", options.config_file])

    if options.do_train_transition_classifier:
        logging.info("Train transition classifier...")
        check_call(["python", os.path.abspath("train_transition_classifier.py"), "--config", options.config_file])

    if options.do_extract_weights:
        logging.info("Extracting weights from ilastik project...")
        check_call(["python", os.path.abspath("tracking_ilp_to_weights.py"), "--config", options.config_file])

    if options.do_create_graph:
        logging.info("Create hypotheses graph...")
        check_call(["python", os.path.abspath("hypotheses_graph_to_json.py"), "--config", options.config_file])

    if options.do_convexify:
        logging.info("Convexifying graph energies...")
        check_call(["python", os.path.abspath("convexify_costs.py"), "--config", options.config_file])

    if options.do_tracking:
        logging.info("Run tracking...")

        if options.tracking_executable is not None:
            check_call([options.tracking_executable,
                        "-m", options.model_filename,
                        "-w", options.weight_filename,
                        "-o", options.result_filename])
        else:
            try:
                import commentjson as json
            except ImportError:
                import json
            
            import hytra.core.jsongraph
            with open(options.model_filename, 'r') as f:
                model = json.load(f)

            with open(options.weight_filename, 'r') as f:
                weights = json.load(f)
            
            if options.solver == "flow-based":
                import dpct
                result = dpct.trackFlowBased(model, weights)
            elif options.solver == "ilp":
                try:
                    import multiHypoTracking_with_cplex as mht
                except ImportError:
                    try:
                        import multiHypoTracking_with_gurobi as mht
                    except ImportError:
                        raise ImportError("Could not find multi hypotheses tracking ilp solver")
                result = mht.track(model, weights)

            hytra.core.jsongraph.writeToFormattedJSON(options.result_filename, result)

    extra_params = []
    if options.do_merger_resolving:
        logging.info("Run merger resolving")
        check_call(["python", os.path.abspath("run_merger_resolving.py"), "--config", options.config_file])

        for p in ["--out-graph-json-file", "--out-label-image-file", "--out-result-json-file"]:
            index = unknown.index(p)
            extra_params.append(p.replace('--out-', '--'))
            extra_params.append(unknown[index + 1])

    if options.export_format is not None:
        logging.info("Convert result to {}...".format(options.export_format))
        if options.export_format in ['ilastikH5', 'ctc']:
            check_call(["python", os.path.abspath("json_result_to_events.py"), "--config", options.config_file] + extra_params)
            if options.export_format == 'ctc':
                check_call(["python", os.path.abspath("ctc/hdf5_to_ctc.py"), "--config", options.config_file] + extra_params)
        elif options.export_format == 'labelimage':
            check_call(["python", os.path.abspath("json_result_to_labelimage.py"), "--config", options.config_file] + extra_params)
        elif options.export_format is not None:
            logging.error("Unknown export format chosen!")
            raise ValueError("Unknown export format chosen!")
Exemplo n.º 60
0
from url_requests import test_infrastructure
import subprocess


### MAIN
if __name__ == "__main__":
    if len(sys.argv) > 4:
        configFile = sys.argv[4]
    else:
        print "No config file specified. Using 'config_example.json'"
        configFile = 'config_example.json'

    try:
        with open(sys.argv[3] + "/config_files/" + configFile) as json_data_file:
            try:
                configData = commentjson.load(json_data_file)
            except ValueError:
                print "Wrong data format. Should be json."
                exit(1)
            except commentjson.JSONLibraryException:
                print "Wrong data format. Should be json."
                exit(1)
    except IOError:
        print "File not found/permission was denied."
        exit(1)

    configData['creds']['os_password'] = sys.argv[1]
    configData['framework_dir'] = sys.argv[3]

    print "Checking JSON structure..."
    if check_config_structure(configData) == -1: