Ejemplo n.º 1
0
def main():     
    manipulatedVariables = OrderedDict()
    manipulatedVariables['alphaForStateWidening'] = [0.25]
    manipulatedVariables['attentionType'] = ['idealObserver']#, 'hybrid4']
    #manipulatedVariables['attentionType'] = ['hybrid4', 'preAttention']
    #manipulatedVariables['attentionType'] = ['preAttention']
    #manipulatedVariables['attentionType'] = ['idealObserver', 'preAttention', 'attention4', 'hybrid4']
    #manipulatedVariables['attentionType'] = ['preAttentionMem0.65', 'preAttentionMem0.25', 'preAttentionPre0.5', 'preAttentionPre4.5']
    manipulatedVariables['CForStateWidening'] = [2]
    manipulatedVariables['minAttentionDistance'] = [10.0, 40.0]#[10.0, 20.0, 40.0]
    manipulatedVariables['rangeAttention'] = [10.0]
    manipulatedVariables['cBase'] = [50]
    manipulatedVariables['numTrees'] = [1, 2]
    manipulatedVariables['numSimulationTimes'] = [103]
    manipulatedVariables['actionRatio'] = [0.02, 0.05]
    manipulatedVariables['burnTime'] = [0]
    manipulatedVariables['softId'] = [1, 9]
    manipulatedVariables['softSubtlety'] = [1, 9]
 
    productedValues = it.product(*[[(key, value) for value in values] for key, values in manipulatedVariables.items()])
    parametersAllCondtion = [dict(list(specificValueParameter)) for specificValueParameter in productedValues]

    DIRNAME = os.path.dirname(__file__)
    trajectoryDirectory = os.path.join(DIRNAME, '..', 'data', 'mcts',
                                'trajectories')
    if not os.path.exists(trajectoryDirectory):
        os.makedirs(trajectoryDirectory)

    trajectoryExtension = '.pickle'
    getTrajectorySavePathByCondition = lambda condition: tsl.GetSavePath(trajectoryDirectory, trajectoryExtension, condition)
    measurementEscapeExtension = '.csv'
    getCSVSavePathByCondition = lambda condition: tsl.GetSavePath(trajectoryDirectory, measurementEscapeExtension, condition)
    runOneCondition = RunOneCondition(getTrajectorySavePathByCondition, getCSVSavePathByCondition)

    #runOneCondition(parametersAllCondtion[0])
    numCpuCores = os.cpu_count()
    numCpuToUse = int(numCpuCores)
    runPool = mp.Pool(numCpuToUse)
    runPool.map(runOneCondition, parametersAllCondtion)
   
    precisionToSubtletyDict={500:0,50:5,11:30,3.3:60,1.83:90,0.92:120,0.31:150,0.001: 180}
Ejemplo n.º 2
0
def HelixMeasurements(Ref_Coords, Tgt_Coords, Data, parm, output):
    # Input_Coords = [pdb_name, H_Crds, N_Crds, C_Crds, G_Crds, R_Crds, T_Crds]
    #     x_Coords = [resname, resid, bb_crds, ca_crd, cg_crd, avg_crd, cb_crd]

    print(
        '##################################################################\n')

    # Create helix object for MPI
    Ref = HelixAxis(Ref_Coords)

    if parm['MPICPU'][0] == 1:
        Tmp = [HelixAxis(Tgt) for Tgt in tqdm(Tgt_Coords)]
    else:
        if parm['MPICPU'][0] == 0:
            mpi_cpu = multiprocessing.cpu_count()
        else:
            mpi_cpu = parm['MPICPU'][0]
        mpi = multiprocessing.Pool(mpi_cpu)
        Tmp = [
            x for x in tqdm(mpi.imap(HelixAxis, Tgt_Coords),
                            total=len(Tgt_Coords))
        ]
        mpi.close()
        mpi.join()

    # Tgt = [ pdb_id, res_id, axis, cg_nom, cg_vec, sc_vec,
    #         sc_pres, cg_pres, curve, phi, psi, r_median, r_std,
    #         Reg2 ]

    ## Have to leave None in Reg2 to keep Reg2 to have same number as PDB_Coords
    ## for DomainDistances(), which needs Reg2
#  Tgt_List = [x for x in Tmp if x is not None]
    Tgt_List = Tmp
    print('\n ## Helix Axis return: {0}\n'.format(len(Tgt_List)))
    CollectHelix(Ref, Tgt_List, Data)

    # Extract the 2nd-order regression data for other use
    # last item in Tgt_List is the Reference item.. pop
    RefReg2 = Ref[-1]
    Reg2 = [Tgt[-1] for Tgt in Tgt_List[:-1]]
    return RefReg2, Reg2
Ejemplo n.º 3
0
    def run(self, common_params, method_params):

        self._checkCommonParams(common_params)
        #self._setCommonParams(common_params)
        self.method_params = method_params
        self.common_params = common_params
        
        # parallel execution common additional preparation
        self.num_threads = 2
        if('num_threads' in common_params and common_params['num_threads'] is not None):
            self.num_threads = int(common_params['num_threads'])


        # method specific execution preparation
        self.prepare()

        if self.num_cores != 1:
            pool_size,self.num_threads=self._optimizeParallel(self.num_jobs,self.num_threads,self.num_cores, self.max_cores)
        else:
            pool_size = 1
            self.num_threads = 1
        # sanity check
        if self.num_jobs is None:
            raise ExecutionBaseException("Need to define num_jobs during prepare()")
        if len(self.task_list) < 1:
            raise ExecutionBaseException("Need to define task_list during prepare()")

        pool = mp.Pool(pool_size)
        self.results = pool.map(self.runEach, self.task_list)
        #pool.close()
        #pool.join()

        self.collect()
        self.writeReport()

        # clean-ups
        self.num_jobs = None
        self.task_list = []
        self.common_params = None
        self.method_params = None
        return self.returnVal
Ejemplo n.º 4
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--output_csv',
                        required=True,
                        help='File for outputting results')
    args = parser.parse_args()

    to_run = {
        'aoerc': 'person',
        'broadway-jackson-hole': 'car',
        'buffalo-meat': 'person',
        'buses-cars-archie-europe-rotonde': 'car',
        'bytes': 'person',
        'coral-reef': 'person',
        'coupa': 'person',
        'elevator': 'person',
        'huang': 'person',
        'lady-in-the-corner': 'person',
        'live-zicht-binnenhaven': 'car',
        'shibuya-halloween': 'car',
        'taipei': 'bus',
        'town-square-shootout': 'person'
    }
    input_csv_base = '/dfs/scratch1/jemmons/labels-truncated/%s.mp4.csv.truncated'
    runner = 'gen_accuracy_plot.py'

    i = 0
    run_args = []
    for dir_name in to_run:
        for filename in glob.iglob('%s/*.csv' % dir_name):
            OBJECT = to_run[dir_name]
            truth_csv_name = input_csv_base % dir_name
            test_csv_name = filename
            run_args.append((i, OBJECT, dir_name, truth_csv_name,
                             test_csv_name, args.output_csv, runner))
            i += 1

    pool = multiprocessing.Pool(48)
    results = pool.map(fn, run_args)
    print results
    print len(results)
Ejemplo n.º 5
0
    def __enter__(self):

        if self._mode == 'multiprocessing':
            import dask.multiprocessing
            import multiprocess.context as ctx

            ctx._force_start_method('spawn')

            processes = self._calc_threads(
            ) if self._processes is None else self._processes
            self._pool = mp.Pool(processes=processes)
            self._options = dask.context.set_options(
                pool=self._pool, get=dask.multiprocessing.get)
            self._logger.info(
                "Starting processing with {} processes".format(processes))
        elif self._mode == 'synchronous':
            import dask.local
            self._options = dask.context.set_options(get=dask.local.get_sync)
            self._logger.info("Starting synchronous processing")

        return self
Ejemplo n.º 6
0
def get_distance_matrix(numpy_array):
    sc = TimeSeriesScalerMeanVariance()
    X_s = sc.fit_transform(to_time_series_dataset(numpy_array))

    size = len(X_s)

    idx = [(i, j) for i in range(0, size) for j in range(i + 1, size)]

    def calc_dtw(my_idx):
        i, j = my_idx
        return dtw(X_s[i], X_s[j])

    with mp.Pool(mp.cpu_count() - 1) as p:
        distances = p.map(calc_dtw, idx)

    dm = np.zeros(shape=(size, size))
    for (i, j), v in zip(idx, distances):
        dm[i, j] = v
        dm[j, i] = v

    return dm
Ejemplo n.º 7
0
    def _apply_multi_core(self,
            book: 'Book',
            data: Union['Dataset', 'Book'],
            method: Callable) -> 'Book':
        """Applies 'method' to 'data' using multiple CPU cores.

        Args:
            book ('Book'): siMpLify class instance with Chapter instances to
                parallelize.
            data (Union['Dataset', 'Book']): an instance containing data to
                be modified.
            method (Callable): method to parallelize.

        Returns:
            'Book': with its iterable applied to data.

        """
        with mp.Pool() as pool:
            pool.starmap(method, arguments)
        pool.close()
        return self
Ejemplo n.º 8
0
def extract_features(images, gts, measure, fns, n_jobs=1, loader=imread):

    if n_jobs != 1:
        l = list(
            map(lambda x: (x[0], x[1], measure, fns, loader), zip(images,
                                                                  gts)))
        if n_jobs < 0:
            n_jobs = cpu_count()
        p = pp.Pool(n_jobs)
        features = p.map(
            lambda x: feature_extractor(x[0], x[1], x[2], x[3], x[4]), l)
        p.close()
        p.join()
        return features
    else:
        features = []
        for img, gt in zip(images, gts):
            ftrs = feature_extractor(img, gt, measure, fns)
            features.append(ftrs)
        features = np.stack(features)
        return features
def run():
    """Run trainings with all possible parameter combinations in
    the configured space.
    """

    process_pool = multiprocessing.Pool(processes=max_worker_num,
                                        maxtasksperchild=1)
    device_queue = _init_device_queue(max_worker_num)

    for seed in seeds:
        for env, state_dim, action_dim in zip(envs, state_dims, action_dims):
            command = "python alignexp.py --eval_n 10 --data_type1 'base' --data_id1 {} --data_type2 {} --data_id2 {}  --seed {} --env {} --state_dim1={} --action_dim1={} --state_dim2={} --action_dim2={}".format(
                data_id, data_type, data_id, seed, env, state_dim, action_dim,
                state_dim, action_dim)
            print(command)
            process_pool.apply_async(func=_worker,
                                     args=[command, device_queue],
                                     error_callback=lambda e: logging.error(e))

    process_pool.close()
    process_pool.join()
Ejemplo n.º 10
0
 def _process_plot_tvshowstats():
     tvdata_date_dict = plextvdb.get_tvdata_ordered_by_date(
         self.tvdata_on_plex)
     years_have = set(map(lambda date: date.year, tvdata_date_dict))
     with multiprocessing.Pool(
             processes=multiprocessing.cpu_count()) as pool:
         figdictdata = dict(
             pool.map(
                 lambda year:
                 (year,
                  plextvdb.create_plot_year_tvdata(
                      tvdata_date_dict, year, shouldPlot=False)),
                 years_have))
     myLock.acquire()
     myStage.value += 1
     mytxt = '%d, made plots of tv shows added in %d years in %0.3f seconds.' % (
         myStage.value, len(years_have), time.time() - time0)
     logging.info(mytxt)
     self.emitString.emit(mytxt)
     myLock.release()
     shared_list.append(('plotYears', figdictdata))
Ejemplo n.º 11
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--resol', required=True, type=int, help='Output csv directory')
    args = parser.parse_args()

    to_run = {'aoerc': 'person',
              'broadway-jackson-hole': 'car',
              'buffalo-meat': 'person',
              'buses-cars-archie-europe-rotonde': 'car',
              'bytes': 'person',
              'coral-reef': 'person',
              'coupa': 'person',
              'elevator': 'person',
              'huang': 'person',
              'lady-in-the-corner': 'person',
              'live-zicht-binnenhaven': 'car',
              'shibuya-halloween': 'car',
              'taipei': 'bus'}
    input_csv_base = '/dfs/scratch1/fabuzaid/noscope-datasets-completed/csv/%s.mp4.csv'
    input_vid_base = '/dfs/scratch1/fabuzaid/noscope-datasets-completed/videos/%s.mp4'
    output_dir = '/dfs/scratch1/ddkang/experiments/metrics_window30_all'
    runner = '/afs/cs.stanford.edu/u/ddkang/code/noscope/exp/image_metrics.py'

    run_args = []
    RESOL = args.resol
    for DELAY in [10, 30]:
        for fname in to_run:
            OBJECT = to_run[fname]
            csv_in_name = input_csv_base % fname
            vid_in_name = input_vid_base % fname
            run_args.append(
                    (RESOL, DELAY, OBJECT, fname,
                     csv_in_name, vid_in_name,
                     output_dir,
                     runner))

    pool = multiprocessing.Pool(5)
    results = pool.map(fn, run_args)
    print results
    print len(results)
Ejemplo n.º 12
0
def remove_unmeasured_nodes(graph, measured):
    new_g = graph.copy()
    edge_info_dict = dict()
    for i, j, data in graph.edges(data=True):
        edge_info_dict[(i, j)] = data
    nodes = set(graph.nodes())
    include = set(measured)
    include.intersection_update(nodes)

    # for i, j in :
    def find(d):
        i, j = d
        paths = []
        if nx.has_path(graph, i, j):
            for p in nx.all_shortest_paths(graph, i, j):
                path = []
                label = []
                for n in p:
                    if n in include:
                        path.append(n)
                    else:
                        label.append(n)
                if len(path) == 2:
                    paths.append((path, '|'.join(l for l in label)))
        return paths

    x = mp.Pool(4)
    paths = x.map(find, itertools.combinations(include, 2))
    to_remove = set()
    for p in paths:
        if len(p) != 2:
            continue
        for path, label in p:
            print(path, label)
            for n in label.split('|'):
                to_remove.add(n)
            new_g.add_edge(path[0], path[1], label=label)
    for n in to_remove:
        new_g.remove_node(n)
    return new_g
def main():
    to_run = {
            # video, object, metric, model, reg, delay, resolution, ref-index, num-blocks
            'aoerc': ('person', 'mse', 'lr', 'l2', 10, 100, -1, 10),
            'broadway-jackson-hole': ('car', 'mse', 'lr', 'l2', 10, 50, 100, 10),
            'buffalo-meat': ('person', 'mse', 'lr', 'l2', 10, 100, 100, 10),
            'buses-cars-archie-europe-rotonde': ('car', 'mse', 'lr', 'l2', 10, 50, 350, 10),
            'bytes': ('person', 'mse', 'lr', 'l2', 30, 50, -1, 10),
            'coral-reef': ('person', 'mse', 'lr', 'l2', 10, 100, 500, 10),
            'coupa': ('person', 'mse', 'lr', 'l2', 10, 50, 100, 10),
            'elevator': ('person', 'mse', 'lr', 'l2', 10, 100, 100, 10),
            'huang': ('person', 'mse', 'lr', 'l2', 10, 50, 110, 10),
            'lady-in-the-corner': ('person', 'mse', 'lr', 'l2', 10, 100, -1, 10),
            'live-zicht-binnenhaven': ('car', 'mse', 'lr', 'l2', 10, 50, 100, 10),
            'shibuya-halloween': ('car', 'mse', 'lr', 'l2', 10, 50, 50, 10),
            'taipei': ('bus', 'mse', 'lr', 'l2', 10, 50, 50, 10),
            'town-square-shootout': ('bus', 'mse', 'lr', 'l2', 10, 50, 50, 10)
        }
    input_csv_base = '/dfs/scratch1/fabuzaid/noscope-datasets-completed/csv/%s.mp4.csv'
    input_vid_base = '/dfs/scratch1/fabuzaid/noscope-datasets-completed/videos/%s.mp4'
    output_dir = '/dfs/scratch1/fabuzaid/noscope-experiments/blocked_filters-gold-standard-no-mean/'
    runner = 'blocked_filters.py'

    run_args = []
    for fname in to_run:
        OBJECT, METRIC, MODEL, REG, DELAY, RESOL, REF_INDEX, NUM_BLOCKS = to_run[fname]
        csv_in_name = input_csv_base % fname
        vid_in_name = input_vid_base % fname
        # no ref-index
        run_args.append(
                (RESOL, DELAY, NUM_BLOCKS, REF_INDEX, REG,
                 MODEL, METRIC, OBJECT, fname, csv_in_name,
                 vid_in_name, output_dir + fname, runner)
        )
        # with ref-index

    pool = multiprocessing.Pool(13)
    results = pool.map(fn, run_args)
    print results
    print len(results)
Ejemplo n.º 14
0
def _make_plots(plots_to_make, plot_func, parallel=False):
    for i, _ in enumerate(plots_to_make):
        plots_to_make[i].append('pdf')
        plots_to_make[i].append(True)

    if parallel:
        st2 = time.time()
        pool = mp.Pool()
        # lambda a: function(a[0], **a[1]), arguments
        pool.map_async(lambda a: plot_func(*a), plots_to_make)
        pool.close()
        pool.join()
        end2 = time.time()
        print("parallel time = {}".format(end2 - st2))
        print("Done creating plots for each GO term")

    else:
        st1 = time.time()
        list(map(lambda a: plot_func(*a), plots_to_make))
        end1 = time.time()
        print("sequential time = {}".format(end1 - st1))
    plt.close('all')
Ejemplo n.º 15
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--output-dir', required=True)
    args = parser.parse_args()

    runner = './rmse.py'

    output_dir = args.output_dir
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    run_args = []
    numa_queue = get_numa_queue(2)

    for (model_dir, _, (training_file, test_file)) in TO_RUN:
        input_dir = os.path.join(MODEL_DIR_BASE, model_dir)
        base_name = model_dir.replace('/', '-')
        run_args.append((numa_queue, training_file, test_file, input_dir,
                         base_name, output_dir, runner))

    pool = multiprocessing.Pool(NUM_NUMA_NODES * 2)
    pool.map(run, run_args)
Ejemplo n.º 16
0
    def perform_qa(self):
        outdir = os.path.join(self.out_prefix, Analysis.out_subdir['qa'])
        logging.info(
            f"Starting quality assessment (FastQC), results will be stored here: {outdir}"
        )
        Support.safe_dir_create(outdir)
        cmd_queue = []
        for sample_id in self.samples:
            logging.info(f"Queuing {sample_id}")
            r1 = self.samples[sample_id]["r1"]
            r2 = self.samples[sample_id]["r2"]
            cmd = f"fastqc -o {outdir} {r1} {r2}"
            # cmd = f"fastqc -o {outdir} -t {self.threads} {r1} {r2}"
            # Support.run_command(command_str=cmd)
            cmd_queue.append(cmd)
        pool = mp.Pool(processes=self.threads)
        pool.map(lambda x: Support.run_command(command_str=x), cmd_queue)
        logging.info(f"Quality assessed for all samples")

        logging.info(f"Compiling QA reports")
        cmd = f"multiqc -o {outdir} {outdir}"
        Support.run_command(cmd)
Ejemplo n.º 17
0
    def multiprocess_cluster(self, number_of_files, number_of_process):
        '''
        Using multiprocess, test k-means cluster and pickled method into KeyFrame objects and load the pickle data from file.
        :return: None
        '''
        start_time = time.time()

        # define a nested function for multiprocessing
        def cluster_f(file):
            colors_info = cluster.kmeans_cluster(file, 5)
            return KeyFrame(colors_info)

        # get the directory of all files
        list_of_files = self.getListOfFiles('./testfile')
        # use 10 file for test
        test_len = number_of_files
        # initialize cluster utility
        cluster = ClusterUtility
        # using 5 process
        p = mp.Pool(number_of_process)
        # multiprocessing
        # store a list of KeyFrames
        img = p.map(cluster_f, list_of_files[:test_len])
        p.close()
        p.join()
        cluster_time = time.time() - start_time
        # create the pickle obj
        # if the pickled file exists, remove the file and create a new
        if os.path.exists('../test_search/pickledFrames'):
            os.remove('../test_search/pickledFrames')
        pfile = open('../test_search/pickledFrames', 'ab')
        dill.dump(img, pfile)
        pfile.close()
        pfile = open('../test_search/pickledFrames', 'rb')
        img_data = dill.load(pfile)
        test_case = TestCase()
        pfile.close()
        test_case.assertEqual(test_len, len(img_data))
        return cluster_time
Ejemplo n.º 18
0
    def query(
        self,
        observations: Optional[Union[Dict[str, Any], List[Dict[str,
                                                               Any]]]] = None,
        parallel: bool = False,
        num_cores: Optional[int] = None,
    ) -> Union[Dict[str, Dict[Hashable, float]], List[Dict[str, Dict[
            Hashable, float]]], ]:
        """
        Queries the ``BayesianNetwork`` for marginals given one or more observations.

        Args:
            observations: one or more observations of states of nodes in the Bayesian Network.
            parallel: if True, run the query using multiprocessing
            num_cores: only applicable if parallel=True. The number of cores used during multiprocessing.
                       If num_cores is not provided, number of processors will be autodetected and used

        Returns:
            A dictionary or a list of dictionaries of marginal probabilities of the network.

        Raises:
            TypeError: if observations is neither None nor a dictionary nor a list
        """
        if observations is not None and not isinstance(observations,
                                                       (dict, list)):
            raise TypeError(
                "Expecting observations to be a dict, list or None")

        if isinstance(observations, list):
            if parallel:
                with multiprocessing.Pool(num_cores) as p:
                    result = p.map(self._single_query, observations)
            else:
                result = [self._single_query(obs) for obs in observations]
        else:  # dictionary or None
            result = self._single_query(observations)

        return result
    def loo_cross_validation(self,
                             batch_size=32,
                             train_rate=0.1,
                             steps=1,
                             epochs_per_step=100000,
                             threads=4):
        def loo_x_validate(loo_cand):
            model = SimpleRecurrentModel(**self.model_args)
            filtered_inputs = [
                self.input_data[i] for i in range(len(self.input_data))
                if i != loo_cand
            ]
            filtered_targets = [
                self.target_data[i] for i in range(len(self.target_data))
                if i != loo_cand
            ]
            processed_inputs, processed_targets = model.assemble_data(
                filtered_inputs, filtered_targets)

            training_results = {}
            for s in range(1, steps + 1):
                model._raw_train(processed_inputs, processed_targets,
                                 batch_size, train_rate, epochs_per_step)
                training_results[s *
                                 epochs_per_step] = model.compute_inference(
                                     self.input_data[loo_cand])
            return training_results

        loo_candidates = range(len(self.input_data))
        p = mp.Pool(threads)
        results = p.map(loo_x_validate, loo_candidates)
        p.close()

        processed_results = {}
        for k in results[0].keys():
            processed_results[k] = [r[k] for r in results]
        self.loo_cross_validation_results = processed_results
        return self.loo_cross_validation_results
Ejemplo n.º 20
0
    def createPNGPicObjects( cls, pImgClient ):
        """
        :param PlexIMGClient pImgClient: the :py:class:`PlexIMGClient <howdy.email.HowdyIMGClient>` used to access and manipulate (add, delete, rename) images in the main Imgur_ album.
        :returns: a :py:class:`list` of :py:class:`PNGPicObject <howdy.email.PNGPicObject>` representing the images in the main Imgur_ album.
        :rtype: list
        """
        pngPICObjects = [ ]
        def _create_object( imgMD5 ):
            imgName, imgID, imgurlLink, imgDateTime = pImgClient.imghashes[ imgMD5 ]
            try:
                newObj = PNGPicObject( {
                    'initialization' : 'SERVER',
                    'imgurlLink' : imgurlLink,
                    'imgName' : imgName,
                    'imgMD5' : imgMD5,
                    'imgDateTime' : imgDateTime }, pImgClient )
                return newObj
            except: return None

        with multiprocessing.Pool( processes = multiprocessing.cpu_count( ) ) as pool:
            pngPICObjects = list( filter(
                None, map( _create_object, pImgClient.imghashes ) ) )
            return pngPICObjects
Ejemplo n.º 21
0
def target_creator(masks, n_jobs=1):
    """
    Creates a segmentation target with 3 classes: 0 - one illuminant, 1 - black, 2 - second illuminant.
    The classes are one hot encoded.

    :param masks: Input mask to encode where 0 is the second illuminant, 255 is the first illuminant and 128 is black.
    :return: Encoded mask used for segmentation
    """
    if n_jobs != 1:
        if n_jobs < 0:
            n_jobs = cpu_count()
        p = pp.Pool(n_jobs)
        nm = p.map(create_target, masks)
        p.close()
        p.join()
        return nm
    else:
        nm = []
        for mask in masks:
            mask = create_target(mask)
            nm.append(mask)
        nm = np.stack(nm)
        return nm
    def read_edge_vol_delay(in_files: Sequence[str]) -> pd.DataFrame:
        with mp.Pool() as pool:
            tables = pool.map(
                functions_expansion(dynamic_assignment_file_read,
                                    lambda tbd: tbd['VolTime'], timevol_table),
                in_files)

        sim_itr = [
            int(path.basename(fr).split('.')[0].split('_')[-1])
            for fr in in_files
        ]
        for itr, table in zip(sim_itr, tables):
            table['ITR'] = itr

        sim_dem = [
            float(
                path.split(path.split(fr)[0])[-1].replace('Vol', '').replace(
                    'per', '')) / 100 for fr in in_files
        ]
        for dem, table in zip(sim_dem, tables):
            table['Demand'] = dem

        return pd.concat(tables)