Example #1
0
def RunWithLogging(numberOfRepetitions, players = None, saveGameStateLogs = False, agentIndex = 0, multiprocess = False):

    if players is None:
        players = defaultPlayers

    logger = logging.getLogger()

    today = datetime.datetime.today()

    fileName            = 'SimulatorLogs/log_{0}.txt'.format(today.strftime("%d-%m-%Y_%H-%M"))
    fileNameSimulations = 'SimulatorLogs/logSim_{0}.txt'.format(today.strftime("%d-%m-%Y_%H-%M"))

    logFile = logging.FileHandler(fileName)

    logger.addHandler(logFile)

    winCount = [0, 0, 0, 0]

    totalTime = datetime.datetime.utcnow()

    if multiprocess:
        num_cores = multiprocessing.cpu_count()

        winners   = Parallel(n_jobs=num_cores)(delayed(RunParallel)
                                               (CreateGame(players), i, numberOfRepetitions, fileNameSimulations)
                                               for i in range(0, numberOfRepetitions))

        winCount  = [winners.count(0), winners.count(1), winners.count(2), winners.count(3)]

    else:

        games = [CreateGame(players) for i in range(numberOfRepetitions)]

        for i in range(0, numberOfRepetitions):

            time = datetime.datetime.utcnow()

            winner = RunGame(games[i], games[i].gameState.players, showLog=True, showFullLog=False, saveImgLog=saveGameStateLogs)

            logging.critical("\n GAME TIME = {0}".format(((datetime.datetime.utcnow() - time).total_seconds())))

            winCount[winner] += 1

            total = "\n TOTAL GAMES = {0}/{1} ".format(
                (i + 1),
                numberOfRepetitions)

            print(total)

            logging.critical(total)

    logging.critical("\n TOTAL TIME = {0}".format(((datetime.datetime.utcnow() - totalTime).total_seconds())/60.0))

    logging.critical(" TOTAL GAMES = {0} \n WIN COUNT = {1} \n AGENT WIN PERCENTAGE = {2}%".format(
        numberOfRepetitions,
        winCount,
        (float(winCount[agentIndex]) / numberOfRepetitions) * 100.0
    ))
Example #2
0
def p_w_l(l):
    r = []
    for i in range(0, l):
        r.append(['T', 'F', ' '])
    ps = [0, 0, 0]
    results = Parallel(n_jobs=4)(delayed(p_w)(e) for e in product(*r))
    ps[0] = results.count(0)
    ps[1] = results.count(1)
    ps[2] = results.count(2)
    return ps
Example #3
0
def refine_thar_positions(wave_init, order_init, thar1d_fixed, thar_list,
                          fit_width=5., lc_tol=5., k=3, n_jobs=10, verbose=10):
    print("@Cham: refine ThAr positions ...")

    # refine thar positions for each order
    r = Parallel(n_jobs=n_jobs, verbose=verbose, batch_size=1)(
        delayed(refine_thar_positions_order)(
            wave_init[i_order],
            np.arange(wave_init.shape[1]),
            thar1d_fixed[i_order],
            thar_list[(thar_list > np.min(wave_init[i_order]) + 1.) * (
            thar_list < np.max(wave_init[i_order]) - 1.)],
            order_init[i_order, 0],
            fit_width=fit_width, lc_tol=lc_tol, k=k
        ) for i_order in range(wave_init.shape[0]))

    # remove all null values
    null_value = (None, None, None, None, None)
    for i in range(r.count(null_value)):
        r.remove(null_value)
        print(len(r))

    # collect data
    lc_coord = np.array(np.hstack([_[0] for _ in r]))
    lc_order = np.array(np.hstack([_[1] for _ in r]))
    lc_thar = np.array(np.hstack([_[2] for _ in r]))
    popt = np.array(np.vstack([_[3] for _ in r]))
    pcov = np.array(np.vstack([_[4] for _ in r]))

    return lc_coord, lc_order, lc_thar, popt, pcov
Example #4
0
def upload_retrospective_study(path_study_root_folder_path):
    """
    Sample script to recursively import in Orthanc all the DICOM files
    that are stored in some path. Please make sure that Orthanc is running
    before starting this script. The files are uploaded through the REST
    API.

    Usage: %s [hostname] [HTTP port] [path]
    Usage: %s [hostname] [HTTP port] [path] [username] [password]
    For instance: %s 127.0.0.1 8042 .

    :param path_study_root_folder_path:
    :return: 1
    """

    success_count = 0
    total_file_count = 0

    if os.path.isfile(path_study_root_folder_path):
        # Upload a single file
        total_file_count = 1
        success_count = read_upload_file(path_study_root_folder_path)
    else:
        # Recursively upload a directory
        list_files = recursive_list(path_study_root_folder_path)

        total_file_count = len(list_files)

        # Serial process them:
        # for file in tqdm(list_files):
        #    if read_upload_file(file):
        #        success_count += 1

        # Parallel process them
        num_cores = multiprocessing.cpu_count()

        # Store the output in a list
        results = Parallel(n_jobs=num_cores)(delayed(read_upload_file)(i)
                                             for i in list_files)

        # Count success.
        success_count = results.count(True)

    if success_count == total_file_count:
        logger.info(
            "\nSummary: all %d DICOM file(s) have been imported successfully" %
            success_count)
    else:
        logger.warning(
            "\nSummary: %d out of %d files have been imported successfully as DICOM instances"
            % (success_count, total_file_count))
def test_rf_feats_fullspec(slice_infos, i):
    feats, labels = generate_training_feats(slice_infos, i)
    RF = train_rf_classifier(feats, labels, no_trees)

    test_sl = slice_infos[i]
    image = test_sl.slice_im
    label = test_sl.slice_lb
    patches_m, patches_n = extract_roi_patches(image, label, psize)
    patches_n = patches_n
    plabels_m = ['M' for m in patches_m]
    plabels_n = ['N' for n in patches_n]
    kernels = generate_kernels()

    tot = len(patches_m) + len(patches_n)

    res1 = []
    res2 = []

    t0 = time()

    if len(sys.argv) >= 2:
        res1 = Parallel(n_jobs=int(sys.argv[1]))(
            delayed(check_classify)(RF, kernels, p, plabels_m[i], i, tot)
            for i, p in enumerate(patches_m))
        res2 = Parallel(n_jobs=int(sys.argv[1]))(
            delayed(check_classify)(RF, kernels, p, plabels_n[i], i, tot)
            for i, p in enumerate(patches_n))
    else:
        for i, p in enumerate(
                patches_m):  # go through each patch, and classify it!
            res1.append(check_classify(RF, kernels, p, plabels_m[i], i, tot))
        pass

    dt = time() - t0

    print(res1.count(True) + res2.count(True))
    print("Finished in {:.2f} seconds.".format(dt))
Example #6
0
 def checkCluster(self, df1, tanimotoDf, threshold):
     taniDF = tanimotoDf[tanimotoDf.sum(axis=1) != 0]
     clusters = df1['cluster'].unique().tolist()
     #startTime=time.time()
     r = Parallel(n_jobs=-1, backend="threading")([
         delayed(self.checkClusterInner)(cluster, df1, taniDF)
         for cluster in clusters
     ])
     print(r)
     try:
         single = r.count(0)
     except:
         single = 0
     if len(r) == 0:
         return 0
     else:
         return sum(r), single
Example #7
0
    def save(
        self,
        root: Union[str, Path],
        kind: Optional[str] = "json",
        n_jobs: int = 1,
        ignore_exceptions: bool = True,
    ):
        """Save all the music objects to a directory.

        The converted files will be named by its index and saved to ``root/``.

        Parameters
        ----------
        root : str or Path
            Root directory to save the data.
        kind : {'json', 'yaml'}, optional
            File format to save the data. Defaults to 'json'.
        n_jobs : int, optional
            Maximum number of concurrently running jobs in multiprocessing. If
            equal to 1, disable multiprocessing. Defaults to 1.
        ignore_exceptions : bool, optional
            Whether to ignore errors and skip failed conversions. This can be
            helpful if some of the source files is known to be corrupted.
            Defaults to False.

        Notes
        -----
        The original filenames can be found in the ``filenames`` attribute.
        For example, the file at ``filenames[i]`` will be converted and
        saved to ``{i}.json``.

        """
        if kind not in ("json", "yaml"):
            raise TypeError("`kind` must be either 'json' or 'yaml'.")
        if not isinstance(n_jobs, int):
            raise TypeError("`n_jobs` must be an integer.")
        if n_jobs < 0:
            raise ValueError("`n_jobs` must be positive.")

        root = Path(root).expanduser().resolve()
        if not root.exists():
            raise ValueError("`root` must be an existing path.")

        def _saver(idx):
            prefix = "0" * (n_digits - len(str(idx)))
            if ignore_exceptions:
                try:
                    with warnings.catch_warnings():
                        warnings.simplefilter("ignore")
                        self[idx].save(
                            root / (prefix + str(idx) + "." + kind), kind
                        )
                except Exception:  # pylint: disable=broad-except
                    return False
                return True
            self[idx].save(root / (prefix + str(idx) + "." + kind), kind)
            return True

        n_digits = len(str(len(self)))

        print("Start converting and saving the dataset.")
        if n_jobs == 1:
            count = 0
            for idx in tqdm(range(len(self))):  # type: ignore
                if _saver(idx):
                    count += 1
        else:
            if not HAS_JOBLIB:
                raise ValueError(
                    "Optional package joblib is required for multiprocessing "
                    "(n_jobs > 1)."
                )
            # TODO: This is slow as `self` is passed between workers.
            results = Parallel(n_jobs=n_jobs, backend="threading", verbose=5)(
                delayed(_saver)(idx) for idx in range(len(self))
            )
            count = results.count(True)
        print(
            "{} out of {} files successfully saved.".format(count, len(self))
        )
        (root / ".muspy.success").touch(exist_ok=True)
Example #8
0
	for line in cmd.stderr:
		try:
			return float(line)
		except ValueError:
			print(line)
			return 0.0

# Compare all compressed images with original video
for g in gop:
	for f in format:
		for r in fps:
			rr = str(eval(r))
			for b in bitrate:
				folder_images = 'videos_{}gop_{}format_{}fps_{}bitrate_images'.format(g, f, rr, b)
				folder_original = 'videos_{}format_{}fps_images'.format(f, rr)
				compare_total = 0.0
				if os.path.exists('{}/results.txt'.format(folder_images)):
					break
				for name in os.listdir('./original'):
					compare_list = []
					compare_list = Parallel(n_jobs=num_cores)(delayed(launch_process)(folder_original, image, name, folder_images) for image in os.listdir('./original_images/{}/{}'.format(folder_original, name)))

					file_results = open('{}/results.txt'.format(folder_images), "a")
					file_results.write('{}: {}\n'.format(name, sum(compare_list)/(len(compare_list)-compare_list.count(0.0))))
					file_results.close()
					compare_total += sum(compare_list)/(len(compare_list)-compare_list.count(0.0))
				file_results = open('{}/results.txt'.format(folder_images), "a")
				file_results.write('\n{}: total {}\n'.format(compare_total, folder_images))
				file_results.close()

Example #9
0
    def save(
        self,
        root: Union[str, Path],
        kind: str = "json",
        n_jobs: int = 1,
        ignore_exceptions: bool = True,
        verbose: bool = True,
        **kwargs,
    ):
        """Save all the music objects to a directory.

        Parameters
        ----------
        root : str or Path
            Root directory to save the data.
        kind : {'json', 'yaml'}, optional
            File format to save the data. Defaults to 'json'.
        n_jobs : int, optional
            Maximum number of concurrently running jobs. If equal to 1,
            disable multiprocessing. Defaults to 1.
        ignore_exceptions : bool, optional
            Whether to ignore errors and skip failed conversions. This
            can be helpful if some source files are known to be
            corrupted. Defaults to True.
        verbose : bool, optional
            Whether to be verbose. Defaults to True.
        **kwargs
            Keyword arguments to pass to :func:`muspy.save`.

        """
        if kind not in ("json", "yaml"):
            raise TypeError("`kind` must be either 'json' or 'yaml'.")

        root = Path(root).expanduser().resolve()
        root.mkdir(exist_ok=True)

        def _saver(idx):
            prefix = "0" * (n_digits - len(str(idx)))
            filename = root / (prefix + str(idx) + "." + kind)
            if ignore_exceptions:
                try:
                    with warnings.catch_warnings():
                        warnings.simplefilter("ignore")
                        save(filename, self[idx], kind, **kwargs)
                except Exception:  # pylint: disable=broad-except
                    return False
                return True
            save(filename, self[idx], kind, **kwargs)
            return True

        n_digits = len(str(len(self)))

        if verbose:
            print("Converting and saving the dataset...")
        if n_jobs == 1:
            count = 0
            for idx in tqdm(range(len(self))):  # type: ignore
                if _saver(idx):
                    count += 1
        else:
            # TODO: This is slow as `self` is passed between workers.
            results = Parallel(n_jobs=n_jobs, backend="threading",
                               verbose=5)(delayed(_saver)(idx)
                                          for idx in range(len(self)))
            count = results.count(True)
        if verbose:
            print(f"Successfully saved {count} out of {len(self)} files.")
            xlsnamearr.append(name[:-5])
            xlspatharr.append(path)
            xlsfilepatharr.append(os.path.join(path, name[:-5]))
            xlslmt.append(os.path.getmtime(os.path.join(path, name)))
            xlsct.append(os.path.getctime(os.path.join(path, name)))
            
#convert all to numpy array
csvnamearr=np.array(csvnamearr)         
xlsnamearr =np.array(xlsnamearr) 
csvpatharr =np.array(csvpatharr) 
xlspatharr =np.array(xlspatharr) 
csvfilepatharr=np.array(csvfilepatharr)
xlsfilepatharr=np.array(xlsfilepatharr)
csvlmt=np.array(csvlmt) 
csvct=np.array(csvct) 
xlslmt =np.array(xlslmt) 
xlsct=np.array(xlsct)    

#call function to write csv files in a loop
#The maximum number of concurrently running jobs, such as the number of Python worker 
#processes when backend=”multiprocessing” or the size of the thread-pool when 
#backend=”threading”. If -1 all CPUs are used. If 1 is given, no parallel computing code 
#is used at all, which is useful for debugging. For n_jobs below -1, 
#(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one are used. 
if __name__ == '__main__':        
    ret=Parallel(n_jobs=-1,verbose=5)(delayed(csvwrite)(x,csvfilepatharr,xlsfilepatharr,xlsnamearr,csvlmt,xlslmt,xlspatharr,xlsfilearr) for x in range(0,len(xlsnamearr)))
    print("Number of Excel files: "+ str(len(xlsnamearr))+"\n")
    print("Number of Excel files processed: "+ str(ret.count(1))+"\n")
    print("Number of Excel files Skipped Since CSV Exists: "+ str(ret.count(0))+"\n")
    print("Number of Excel files Skipped Because of Bad Sheets: "+ str(ret.count(2))+"\n")
    print("Number of Excel files Not Matching String Criterion: "+ str(ret.count(3))+"\n")
Example #11
0
#nmax = 40000 #Número máximo hasta el que queremos buscar primos
nmax = 400000 #Número máximo hasta el que queremos buscar primos
inputs = range(0, nmax)


def isPrime(num):
    if num < 1:
        return False
    elif num == 2:
        return True
    else:
        for i in range(2, num):
            if num % i == 0:
                return False
        return True            


starttime = time.time()

num_cores = multiprocessing.cpu_count()

#Si accedemos a zonas de memoria comunes (listas diccionarios...) , require='sharedmem'
#pero el rendimiento cae mucho:
results = Parallel(n_jobs=num_cores )(delayed(isPrime)(i) for i in inputs)


print ('encontrados en si %s' % results.count(True))
print ('encontrados en no %s' % results.count(False))

print('That took {} seconds'.format(time.time() - starttime))