def entry_func(args=None):
    # Get parser
    parser = vars(get_parser().parse_args(args))

    # Get parser arguments
    cv_dir = os.path.abspath(parser["CV_dir"])
    out_dir = os.path.abspath(parser["out_dir"])
    create_folders(out_dir)
    await_PID = parser["wait_for"]
    run_split = parser["run_on_split"]
    start_from = parser["start_from"] or 0
    num_jobs = parser["num_jobs"] or 1

    # GPU settings
    num_GPUs = parser["num_GPUs"]
    force_GPU = parser["force_GPU"]
    ignore_GPU = parser["ignore_GPU"]
    monitor_GPUs_every = parser["monitor_GPUs_every"]

    # User input assertions
    _assert_force_and_ignore_gpus(force_GPU, ignore_GPU)
    if run_split:
        _assert_run_split(start_from, monitor_GPUs_every, num_jobs)

    # Wait for PID?
    if await_PID:
        from mpunet.utils import await_PIDs
        await_PIDs(await_PID)

    # Get file paths
    script = os.path.abspath(parser["script_prototype"])
    hparams = os.path.abspath(parser["hparams_prototype"])
    no_hparams = parser["no_hparams"]

    # Get list of folders of CV data to run on
    cv_folders = get_CV_folders(cv_dir)
    if run_split is not None:
        if run_split < 0 or run_split >= len(cv_folders):
            raise ValueError("--run_on_split should be in range [0-{}], "
                             "got {}".format(len(cv_folders) - 1, run_split))
        cv_folders = [cv_folders[run_split]]
        log_appendix = "_split{}".format(run_split)
    else:
        log_appendix = ""

    # Get a logger object
    logger = Logger(base_path="./",
                    active_file="output" + log_appendix,
                    print_calling_method=False,
                    overwrite_existing=True)

    if force_GPU:
        # Only these GPUs fill be chosen from
        from mpunet.utils import set_gpu
        set_gpu(force_GPU)
    if num_GPUs:
        # Get GPU sets (up to the number of splits)
        gpu_sets = get_free_GPU_sets(num_GPUs, ignore_GPU)[:len(cv_folders)]
    elif not num_jobs or num_jobs < 0:
        raise ValueError("Should specify a number of jobs to run in parallel "
                         "with the --num_jobs flag when using 0 GPUs pr. "
                         "process (--num_GPUs=0 was set).")
    else:
        gpu_sets = ["''"] * parser["num_jobs"]

    # Get process pool, lock and GPU queue objects
    lock = Lock()
    gpu_queue = Queue()
    for gpu in gpu_sets:
        gpu_queue.put(gpu)

    procs = []
    if monitor_GPUs_every is not None and monitor_GPUs_every:
        logger("\nOBS: Monitoring GPU pool every %i seconds\n" %
               monitor_GPUs_every)
        # Start a process monitoring new GPU availability over time
        stop_event = Event()
        t = Process(target=monitor_GPUs,
                    args=(monitor_GPUs_every, gpu_queue, num_GPUs, ignore_GPU,
                          gpu_sets, stop_event))
        t.start()
        procs.append(t)
    else:
        stop_event = None
    try:
        for cv_folder in cv_folders[start_from:]:
            gpus = gpu_queue.get()
            t = Process(target=run_sub_experiment,
                        args=(cv_folder, out_dir, script, hparams, no_hparams,
                              gpus, gpu_queue, lock, logger))
            t.start()
            procs.append(t)
            for t in procs:
                if not t.is_alive():
                    t.join()
    except KeyboardInterrupt:
        for t in procs:
            t.terminate()
    if stop_event is not None:
        stop_event.set()
    for t in procs:
        t.join()
Beispiel #2
0
                file.close()

                # boxplot
                try:
                    data = [a[0], b[0]]
                    fig, axs = plt.subplots(1, 2, sharey=True)
                    plt.suptitle("{} - {} \n P Value: {}".format(
                        problem, sourrogate, twosample_results[1]))

                    axs[0].boxplot(a[0])
                    axs[0].set_title('with Sorrugate')
                    plt.ylim(
                        min(min(a[0]), min(b[0])) * 0.98,
                        max(max(a[0]), max(b[0])) * 1.02)
                    axs[1].boxplot(b[0])
                    axs[1].set_title("without Sorrugate")

                    plt.subplots_adjust(left=0.2, wspace=0.8, top=0.8)

                    plt.savefig("/home/naamah/Documents/CatES/result_All/" +
                                problem + "/" + fileName + "/BoxPlot")
                    # plt.show()
                    plt.close('all')
                    plt.close(fig)
                except RuntimeWarning:
                    print("i am in the warning")


if __name__ == '__main__':
    lock2 = Lock()
    def multi_fit(self, data, target, epochs=1, eval_x=None, eval_y=None):

        hash_x = data.copy()
        # Remove id if in the columns
        if self.id in hash_x:
            hash_x.drop(self.id, axis=1, inplace=True)

        if eval_x is not None:
            # val_hash_x = self.multi_hash(eval_x)
            val_hash_x = eval_x.copy()
            # Remove id if in the columns
            if self.id in val_hash_x:
                val_hash_x.drop(self.id, axis=1, inplace=True)

        start = datetime.now()
        count = len(data)
        n_ = Array('d', self.n, lock=False)
        z_ = Array('d', self.z, lock=False)
        lock_ = Lock()
        for e_ in range(epochs):
            # Compute predictions
            loss_train = Value('d', 0, lock=False)
            full_data = np.hstack((target.values.reshape(-1, 1), hash_x.values))
            # Here we use Process directly since map does not support shared objects
            # z and n will be updated wildly, no lock has been implemented
            processes = [
                Process(target=self._train_samples,
                        args=(partial_data, z_, n_, lock_, loss_train))
                for partial_data in np.array_split(full_data, self.cpus)
            ]

            for p in processes:
                p.start()

            while processes:
                processes.pop().join()

            if eval_x is not None:
                # Compute validation losses
                p = Pool(self.cpus)
                # Shared memory is not supported by map and z_ and n_ should be read only here
                # So we create np.arrays form shared memory objects
                oof_v = p.map(functools.partial(self._predict_samples, z_=np.array(z_), n_=np.array(n_)),
                              np.array_split(val_hash_x.values, self.cpus))
                val_preds = np.hstack(oof_v)
                p.close()
                p.join()
                val_logloss = log_loss(eval_y, val_preds)
                val_auc = roc_auc_score(eval_y, val_preds)
                # Display current training and validation losses
                # t_logloss stands for current train_logloss, v for valid
                print('time_used:%s\tepoch: %-4drows:%d\tt_logloss:%.5f\tv_logloss:%.5f\tv_auc:%.6f'
                      % (datetime.now() - start, e_, count + 1, (loss_train.value / count), val_logloss, val_auc))
                del val_preds
                del oof_v
                gc.collect()
            else:
                print('time_used:%s\tepoch: %-4drows:%d\tt_logloss:%.5f'
                      % (datetime.now() - start, e_, count + 1, (loss_train.value / count)))

            # del loss_v
            # print(z_)
            gc.collect()

        self.n = np.array(n_)
        self.z = np.array(z_)
 def __init__(self, initval=0):
     self.val = Value('i', initval)
     self.lock = Lock()
Beispiel #5
0
 def __init__(self, value=0):
     self.val = RawValue('i', value)
     self.lock = Lock()
Beispiel #6
0
def main():
    argparser = argparse.ArgumentParser(
        description="horrible script for downloading anime")
    argparser.add_argument("-d",
                           "--download",
                           help="download a specific anime",
                           type=str)
    argparser.add_argument(
        "-o",
        "--output",
        help="directory to which it will download the files",
        type=str)
    argparser.add_argument("-e",
                           "--episodes",
                           help="specify specific episodes to download",
                           type=str)
    argparser.add_argument("-l",
                           "--list",
                           help="display list of available episodes",
                           action="store_true")
    argparser.add_argument("-r",
                           "--resolution",
                           help="specify resolution quality",
                           type=str)
    argparser.add_argument("--subscribe",
                           help="add a show to the config file",
                           type=str)
    argparser.add_argument("--batch",
                           help="search for batches as well as regular files",
                           action="store_true")
    argparser.add_argument("-q",
                           "--quiet",
                           help="set quiet mode on",
                           action="store_true")
    argparser.add_argument("-lc",
                           "--list-current",
                           help="list all currently airing shows",
                           action="store_true")
    argparser.add_argument("-c",
                           "--config",
                           help="config file location",
                           type=str)
    argparser.add_argument("--noconfirm",
                           help="Bypass any and all “Are you sure?” messages.",
                           action="store_true")
    args = argparser.parse_args()

    logger = logging.getLogger("info")

    if not args.config:
        config = ConfigManager()
    else:
        path, file = os.path.split(args.config)
        if file:
            config = ConfigManager(conf_dir=path, file=file)
        elif path:
            config = ConfigManager(conf_dir=path)
        else:
            config = ConfigManager()
    parser = Parser()

    if args.subscribe:
        episode_number = args.episodes if args.episodes else "0"
        title = parser.get_proper_title(args.subscribe)
        success, show = config.add_entry(title, episode_number)
        if success:
            print(f"Successfully subscribed to: \"{show.lower()}\"")
            print(f"Latest watched episode is - {episode_number}")
        else:
            print(
                f"You're already subscribed to \"{show}\", omitting changes..."
            )
        exit(0)

    if args.list:
        print("\n".join(parser.shows.keys()))
        exit(0)

    if args.list_current:
        print("\n".join(parser.current_shows.keys()))
        exit(0)

    clear()

    if args.output:
        config.download_dir = args.output

    if args.resolution:
        config.quality = args.resolution

    qualities = config.quality.split(",")
    if not valid_qualities(qualities):
        print("Bad resolution specified, aborting...")
        exit(1)

    if args.download:
        title = parser.get_proper_title(args.download)
        if not args.quiet:
            print(f"{fg(3)}FETCHING:{fg.rs} {title}")
        episodes = parser.get_episodes(args.download, batches=args.batch)

        def should_download(episode):
            if not args.episodes:
                return True
            return episode_filter(float(episode["episode"]), args.episodes)

        filtered_episodes = list(filter(should_download, episodes))[::-1]
        if not args.quiet:
            clear()
            dots = "." * (50 - len(title))
            found_str = f"FOUND ({len(filtered_episodes)})"
            print(
                f"{fg(3)}FETCHING: {fg.rs}{title}{dots}{fg(10)}{found_str}{fg.rs}"
            )

            episodes_len = len(filtered_episodes) * len(qualities)
            print(
                f'{fg(2)}\nFound {episodes_len} file{"s" if episodes_len > 1 else ""} to download:\n{fg.rs}'
            )
            for episode in filtered_episodes:
                for quality in qualities:
                    print(f'{title} - {episode["episode"]} [{quality}p].mkv')

            if not args.noconfirm and not args.quiet:
                inp = input(
                    f'{fg(3)}\nwould you like to proceed? [Y/n] {fg.rs}')
                if inp not in ('', 'Y', 'y', 'yes', 'Yes'):
                    print(fg(1) + 'aborting download\n' + fg.rs)
                    exit(1)

        for episode in filtered_episodes:
            download(episode, qualities, config.download_dir)
        exit(0)

    manager = Manager()
    initial_downloads_dict = {
        parser.get_proper_title(title): None
        for title in config.subscriptions.keys()
    }
    downloads = manager.dict(initial_downloads_dict)
    printing_lock = Lock()
    procs = []
    method = "batches" if args.batch else "show"

    if not args.quiet:
        clear()
        for title in initial_downloads_dict.keys():
            print(f"{fg(3)}FETCHING:{fg.rs} {title}")

    for entry in config.subscriptions.items():
        proc = Process(target=fetch_episodes,
                       args=(entry, downloads, printing_lock, parser,
                             args.batch, args.quiet))
        proc.start()
        procs.append(proc)

    for proc in procs:
        proc.join()

    downloads_list = []
    for episodes in downloads.values():
        downloads_list.extend(reversed(episodes))

    if downloads_list == []:
        if not args.quiet:
            print(fg(1) + 'No new episodes were found. Exiting ' + fg.rs)
        logger.info("No new episodes were found. Exiting ")
        exit(0)

    logger.info("found the following files:")
    if not args.quiet:
        episodes_len = len(downloads_list) * len(qualities)
        print(
            f'{fg(2)}\nFound {episodes_len} file{"s" if episodes_len > 1 else ""} to download:\n{fg.rs}'
        )
    for episode in downloads_list:
        for quality in qualities:
            if not args.quiet:
                print(
                    f'{episode["title"]} - {episode["episode"]} [{quality}p].mkv'
                )
            logger.info(
                f'{episode["title"]} - {episode["episode"]} [{quality}p].mkv')

    if not args.noconfirm and not args.quiet:
        inp = input(f'{fg(3)}\nwould you like to proceed? [Y/n] {fg.rs}')
        if inp not in ('', 'Y', 'y', 'yes', 'Yes'):
            print(fg(1) + 'aborting download\n' + fg.rs)
            logger.info("user has aboorted the download")
            exit(1)

    for episode in downloads_list:
        download(episode, qualities, config.download_dir)
        config.update_entry(episode["title"], episode["episode"])
        logger.info(
            f'updated entry: {episode["title"]} - {episode["episode"]}')
    exit(0)
Beispiel #7
0
from multiprocessing import Process, Lock
import time


def task(name, mutex):
    mutex.acquire()  # 上锁,哪个进程抢到锁,就一直给他运行
    print('%s 第一次' % name)
    time.sleep(1)
    print('%s 第二次' % name)
    time.sleep(1)
    print('%s 第三次' % name)
    mutex.release()  # 解锁


if __name__ == '__main__':
    mutex = Lock()  # 只实例化一次,并传给子进程,要保证所有进程用同一把锁
    for i in range(3):
        p = Process(target=task, args=('进程%s' % i, mutex))  # 传递给子进程的锁
        p.start()
"""
进程0 第一次
进程0 第二次
进程0 第三次
进程1 第一次
进程1 第二次
进程1 第三次
进程2 第一次
进程2 第二次
进程2 第三次
"""
# 由上可见,互斥锁的原理,就是把并发改成串行,降低了效率,但保证了数据安全不错乱
Beispiel #8
0
 def __init__(self):
     self.lock = Lock()
     self.values = Manager().dict()
Beispiel #9
0
class MangIP():
    def __init__(self):
        pass

    is_reachable = 0
    state_changes = 0
    hostname = ''
    ip = '127.0.0.1'
    ping_interval = 5
    p_timeout = 4       # Ping timeout
    #internally used
    p_stop_thread = 0
    p_ping_number = 0
    p_delay = 0
    p_state_kept = 1
    p_failed_count = 0
    p_first_ping = 1
    p_prev_state = 0
    p_recent_change = 0

    mutex = Lock()

    def ping(self):
        try:
            d = ping.do_one(self.ip, self.p_timeout)
            if d is None:
                self.is_reachable = 0
            else:
                self.is_reachable = 1
                self.p_delay = d
            d = None
        except socket.gaierror:
            self.hostname = 'ip_err'
            self.is_reachable = 0

    def check_reachability(self):
        self.ping()
        if not self.is_reachable:
            self.p_failed_count += 1

        if self.p_first_ping:
            self.p_prev_state = self.is_reachable
            self.p_state_kept = 1
            self.p_first_ping = 0
        elif self.p_prev_state != self.is_reachable:
            self.p_state_kept = 0
            self.p_recent_change = 1
        else:
            self.p_state_kept += 1

        if self.p_state_kept > 4 and self.p_recent_change :
            self.p_recent_change = 0
            self.state_changes += 1

        self.p_prev_state = self.is_reachable

    def start_check(self, v_interval):
        logging.debug("Thread start for IP -  %s", str(self.ip))
        self.get_dns()
        while not(self.p_stop_thread):
            with self.mutex: #Ping implementation is not thread safe
                self.check_reachability()
            time.sleep(v_interval)
        logging.debug("Thread stop for IP -  %s", str(self.ip))

    def get_dns(self):
        try:
            self.hostname = socket.getfqdn(self.ip)
        except socket.gaierror:
            self.hostname = 'NA'
        except Exception as e:
            print type(e).__name__
            logging.error("Can't resolve hostname for ", str(self.ip))


    def set_ip(self, v_ip):
        self.ip = v_ip

    def to_str(self):
        k = "OFFLINE"
        if self.is_reachable:
            k = "ONLINE"
        return ",".join([self.ip, self.hostname, str(self.state_changes), k])
Beispiel #10
0
即使是使用线程 推荐做法也是将程序设计为独立的线程集合,通过消息队列交换数据
这样极大地减少了对使用锁定和其他同步手段的需求 还可以扩展到分布式系统中

但是进程间应该尽量避免通信,即便通信,也应该选择进程安全的工具来避免加锁带来的问题

数据不安全的根本 多进程之间抢占资源

'''

from multiprocessing import Manager,Process,Lock

def man_dict(dic,locks):
    locks.acquire()
    dic['count']-=1
    locks.release()
    print('子进程---',dic)

if __name__ == '__main__':
    m=Manager()
    locks=Lock()
    p_list=[]
    dicts=m.dict({'count':100})
    for i in range(50):
        p=Process(target=man_dict,args=(dicts,locks))
        p.start()
        p_list.append(p)
    [a.join() for a in p_list]
    print('主进程---',dicts)


import os
from multiprocessing import Process, Queue, Lock
import requests
import time

headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.62 Safari/537.36'}
proc_num = 20
queue_size = 2048
url_set = set()
urls_queue = Queue(queue_size)
queue_lock = Lock()

save_path = "./fec_data"

def producer(src_file_path, urls_queue):
    with open(src_file_path, "r") as src_fr:
        index = 0
        ori_list = src_fr.readlines()
        for imgs_line in ori_list:
            for img_url in imgs_line.split(",")[0: 11: 5]:
                index = index + 1
                if 0 == index % 2000:
                    print("now process is %d / %d " % (index, len(ori_list)))
                if img_url in url_set:
                    continue
                else:
                    url_set.add(img_url)
                    while urls_queue.full():
                        time.sleep(1)
                    queue_lock.acquire()
                    urls_queue.put(img_url)
Beispiel #12
0
from .group import Group
from pprint import pprint as pretify

from multiprocessing import Value, Lock

_TOKEN = "fdc75be5fdc75be5fd75206256fd9e9466ffdc7fdc75be5a56da843614668f3d7775d73"
_LOCK = Lock()


def build(group_id, *, status={}):
    max_posts = 2000
    group = Group(group_id=group_id, access_token=_TOKEN, max_posts=2000)

    members_count, members = group.members()
    posts_count, posts = group.posts()

    post_number = min(posts_count, max_posts)
    count = members_count + max_posts

    males = 0
    females = 0
    cities = []
    for index, item in enumerate(members, start=1):
        females = females + int(item["sex"] == 1)
        males = males + int(item["sex"] == 2)
        cities.append(
            item["city"]["title"] if item.get("city", None) else "Unknown")

        with _LOCK:
            status["members"] = index / members_count
Beispiel #13
0
import sys
import time
import socket
from ctypes import c_int, c_bool
from multiprocessing import Process, Value, Lock
import logging
import logging.handlers
import os
import random

#host = sys.argv[1]
#port = int(sys.argv[2]) if len(sys.argv) == 3 else 6379

process_count = 5  # concurrent sender agents

counter_lock = Lock()
def increment(counter):
    with counter_lock:
        counter.value += 1

def reset(counter):
    with counter_lock:
        counter.value = 0

class Controller:
    def __init__(self):
        self.count_ref = Value(c_int)

    def start(self):
        for i in range(process_count):
            agent = Agent(self.count_ref)
Beispiel #14
0
 def __init__(self):
     self.mutex = Lock()
Beispiel #15
0
def main(cmdargs):
    """Compute the auto and cross-correlation of delta fields"""
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
        description='Compute the auto and cross-correlation of delta fields')

    parser.add_argument('--out',
                        type=str,
                        default=None,
                        required=True,
                        help='Output file name')

    parser.add_argument('--in-dir',
                        type=str,
                        default=None,
                        required=True,
                        help='Directory to delta files')

    parser.add_argument('--from-image',
                        type=str,
                        default=None,
                        required=False,
                        help='Read delta from image format',
                        nargs='*')

    parser.add_argument('--in-dir2',
                        type=str,
                        default=None,
                        required=False,
                        help='Directory to 2nd delta files')

    parser.add_argument('--rp-min',
                        type=float,
                        default=0.,
                        required=False,
                        help='Min r-parallel [h^-1 Mpc]')

    parser.add_argument('--rp-max',
                        type=float,
                        default=200.,
                        required=False,
                        help='Max r-parallel [h^-1 Mpc]')

    parser.add_argument('--rt-max',
                        type=float,
                        default=200.,
                        required=False,
                        help='Max r-transverse [h^-1 Mpc]')

    parser.add_argument('--np',
                        type=int,
                        default=50,
                        required=False,
                        help='Number of r-parallel bins')

    parser.add_argument('--nt',
                        type=int,
                        default=50,
                        required=False,
                        help='Number of r-transverse bins')

    parser.add_argument(
        '--z-cut-min',
        type=float,
        default=0.,
        required=False,
        help=('Use only pairs of forest x object with the mean '
              'of the last absorber redshift and the object '
              'redshift larger than z-cut-min'))

    parser.add_argument(
        '--z-cut-max',
        type=float,
        default=10.,
        required=False,
        help=('Use only pairs of forest x object with the mean '
              'of the last absorber redshift and the object '
              'redshift smaller than z-cut-max'))

    parser.add_argument('--lambda-abs',
                        type=str,
                        default='LYA',
                        required=False,
                        help=('Name of the absorption in picca.constants '
                              'defining the redshift of the delta'))

    parser.add_argument('--lambda-abs2',
                        type=str,
                        default=None,
                        required=False,
                        help=('Name of the absorption in picca.constants '
                              'defining the redshift of the 2nd delta'))

    parser.add_argument('--z-ref',
                        type=float,
                        default=2.25,
                        required=False,
                        help='Reference redshift')

    parser.add_argument(
        '--z-evol',
        type=float,
        default=2.9,
        required=False,
        help=('Exponent of the redshift evolution of the delta '
              'field'))

    parser.add_argument('--z-evol2',
                        type=float,
                        default=2.9,
                        required=False,
                        help=('Exponent of the redshift evolution of the 2nd '
                              'delta field'))

    parser.add_argument('--fid-Om',
                        type=float,
                        default=0.315,
                        required=False,
                        help=('Omega_matter(z=0) of fiducial LambdaCDM '
                              'cosmology'))

    parser.add_argument('--fid-Or',
                        type=float,
                        default=0.,
                        required=False,
                        help=('Omega_radiation(z=0) of fiducial LambdaCDM '
                              'cosmology'))

    parser.add_argument('--fid-Ok',
                        type=float,
                        default=0.,
                        required=False,
                        help='Omega_k(z=0) of fiducial LambdaCDM cosmology')

    parser.add_argument('--fid-wl',
                        type=float,
                        default=-1.,
                        required=False,
                        help=('Equation of state of dark energy of fiducial '
                              'LambdaCDM cosmology'))

    parser.add_argument('--no-project',
                        action='store_true',
                        required=False,
                        help='Do not project out continuum fitting modes')

    parser.add_argument('--remove-same-half-plate-close-pairs',
                        action='store_true',
                        required=False,
                        help=('Reject pairs in the first bin in r-parallel '
                              'from same half plate'))

    parser.add_argument('--nside',
                        type=int,
                        default=16,
                        required=False,
                        help='Healpix nside')

    parser.add_argument('--nproc',
                        type=int,
                        default=None,
                        required=False,
                        help='Number of processors')

    parser.add_argument('--nspec',
                        type=int,
                        default=None,
                        required=False,
                        help='Maximum number of spectra to read')

    parser.add_argument(
        '--unfold-cf',
        action='store_true',
        required=False,
        help=('rp can be positive or negative depending on the '
              'relative position between absorber1 and '
              'absorber2'))

    parser.add_argument('--shuffle-distrib-forest-seed',
                        type=int,
                        default=None,
                        required=False,
                        help=('Shuffle the distribution of forests on the sky '
                              'following the given seed. Do not shuffle if '
                              'None'))

    args = parser.parse_args(cmdargs)

    if args.nproc is None:
        args.nproc = cpu_count() // 2

    # setup variables in module cf
    cf.r_par_max = args.rp_max
    cf.r_trans_max = args.rt_max
    cf.r_par_min = args.rp_min
    cf.z_cut_max = args.z_cut_max
    cf.z_cut_min = args.z_cut_min
    cf.num_bins_r_par = args.np
    cf.num_bins_r_trans = args.nt
    cf.nside = args.nside
    cf.z_ref = args.z_ref
    cf.alpha = args.z_evol
    cf.lambda_abs = constants.ABSORBER_IGM[args.lambda_abs]
    cf.remove_same_half_plate_close_pairs = args.remove_same_half_plate_close_pairs

    # read blinding keyword
    blinding = io.read_blinding(args.in_dir)

    # load fiducial cosmology
    cosmo = constants.Cosmo(Om=args.fid_Om,
                            Or=args.fid_Or,
                            Ok=args.fid_Ok,
                            wl=args.fid_wl,
                            blinding=blinding)

    t0 = time.time()

    ### Read data 1
    data, num_data, z_min, z_max = io.read_deltas(args.in_dir,
                                                  cf.nside,
                                                  cf.lambda_abs,
                                                  cf.alpha,
                                                  cf.z_ref,
                                                  cosmo,
                                                  max_num_spec=args.nspec,
                                                  no_project=args.no_project,
                                                  from_image=args.from_image)
    del z_max
    cf.data = data
    cf.num_data = num_data
    cf.ang_max = utils.compute_ang_max(cosmo, cf.r_trans_max, z_min)
    userprint("")
    userprint("done, npix = {}".format(len(data)))

    ### Read data 2
    if args.in_dir2 or args.lambda_abs2:
        if args.lambda_abs2 or args.unfold_cf:
            cf.x_correlation = True
        cf.alpha2 = args.z_evol2
        if args.in_dir2 is None:
            args.in_dir2 = args.in_dir
        if args.lambda_abs2:
            cf.lambda_abs2 = constants.ABSORBER_IGM[args.lambda_abs2]
        else:
            cf.lambda_abs2 = cf.lambda_abs

        data2, num_data2, z_min2, z_max2 = io.read_deltas(
            args.in_dir2,
            cf.nside,
            cf.lambda_abs2,
            cf.alpha2,
            cf.z_ref,
            cosmo,
            max_num_spec=args.nspec,
            no_project=args.no_project,
            from_image=args.from_image)
        del z_max2
        cf.data2 = data2
        cf.num_data2 = num_data2
        cf.ang_max = utils.compute_ang_max(cosmo, cf.r_trans_max, z_min,
                                           z_min2)
        userprint("")
        userprint("done, npix = {}".format(len(data2)))

    # shuffle forests
    if args.shuffle_distrib_forest_seed is not None:
        cf.data = utils.shuffle_distrib_forests(
            cf.data, args.shuffle_distrib_forest_seed)

    t1 = time.time()
    userprint(f'picca_cf.py - Time reading data: {(t1-t0)/60:.3f} minutes')
    # compute correlation function, use pool to parallelize
    cf.counter = Value('i', 0)
    cf.lock = Lock()
    cpu_data = {healpix: [healpix] for healpix in data}
    context = multiprocessing.get_context('fork')
    pool = context.Pool(processes=args.nproc)
    correlation_function_data = pool.map(corr_func, sorted(cpu_data.values()))
    pool.close()

    t2 = time.time()
    userprint(
        f'picca_cf.py - Time computing correlation function: {(t2-t1)/60:.3f} minutes'
    )

    # group data from parallelisation
    correlation_function_data = np.array(correlation_function_data)
    weights_list = correlation_function_data[:, 0, :]
    xi_list = correlation_function_data[:, 1, :]
    r_par_list = correlation_function_data[:, 2, :]
    r_trans_list = correlation_function_data[:, 3, :]
    z_list = correlation_function_data[:, 4, :]
    num_pairs_list = correlation_function_data[:, 5, :].astype(np.int64)
    healpix_list = np.array(sorted(list(cpu_data.keys())))

    # normalize values
    w = (weights_list.sum(axis=0) > 0.)
    r_par = (r_par_list * weights_list).sum(axis=0)
    r_par[w] /= weights_list.sum(axis=0)[w]
    r_trans = (r_trans_list * weights_list).sum(axis=0)
    r_trans[w] /= weights_list.sum(axis=0)[w]
    z = (z_list * weights_list).sum(axis=0)
    z[w] /= weights_list.sum(axis=0)[w]
    num_pairs = num_pairs_list.sum(axis=0)

    # save data
    results = fitsio.FITS(args.out, 'rw', clobber=True)
    header = [{
        'name': 'RPMIN',
        'value': cf.r_par_min,
        'comment': 'Minimum r-parallel [h^-1 Mpc]'
    }, {
        'name': 'RPMAX',
        'value': cf.r_par_max,
        'comment': 'Maximum r-parallel [h^-1 Mpc]'
    }, {
        'name': 'RTMAX',
        'value': cf.r_trans_max,
        'comment': 'Maximum r-transverse [h^-1 Mpc]'
    }, {
        'name': 'NP',
        'value': cf.num_bins_r_par,
        'comment': 'Number of bins in r-parallel'
    }, {
        'name': 'NT',
        'value': cf.num_bins_r_trans,
        'comment': 'Number of bins in r-transverse'
    }, {
        'name': 'ZCUTMIN',
        'value': cf.z_cut_min,
        'comment': 'Minimum redshift of pairs'
    }, {
        'name': 'ZCUTMAX',
        'value': cf.z_cut_max,
        'comment': 'Maximum redshift of pairs'
    }, {
        'name': 'NSIDE',
        'value': cf.nside,
        'comment': 'Healpix nside'
    }, {
        'name': 'OMEGAM',
        'value': args.fid_Om,
        'comment': 'Omega_matter(z=0) of fiducial LambdaCDM cosmology'
    }, {
        'name': 'OMEGAR',
        'value': args.fid_Or,
        'comment': 'Omega_radiation(z=0) of fiducial LambdaCDM cosmology'
    }, {
        'name': 'OMEGAK',
        'value': args.fid_Ok,
        'comment': 'Omega_k(z=0) of fiducial LambdaCDM cosmology'
    }, {
        'name':
        'WL',
        'value':
        args.fid_wl,
        'comment':
        'Equation of state of dark energy of fiducial LambdaCDM cosmology'
    }, {
        'name': "BLINDING",
        'value': blinding,
        'comment': 'String specifying the blinding strategy'
    }]
    results.write(
        [r_par, r_trans, z, num_pairs],
        names=['RP', 'RT', 'Z', 'NB'],
        comment=['R-parallel', 'R-transverse', 'Redshift', 'Number of pairs'],
        units=['h^-1 Mpc', 'h^-1 Mpc', '', ''],
        header=header,
        extname='ATTRI')

    header2 = [{
        'name': 'HLPXSCHM',
        'value': 'RING',
        'comment': 'Healpix scheme'
    }]
    xi_list_name = "DA"
    if blinding != "none":
        xi_list_name += "_BLIND"
    results.write([healpix_list, weights_list, xi_list],
                  names=['HEALPID', 'WE', xi_list_name],
                  comment=['Healpix index', 'Sum of weight', 'Correlation'],
                  header=header2,
                  extname='COR')

    results.close()

    t3 = time.time()
    userprint(f'picca_cf.py - Time total : {(t3-t0)/60:.3f} minutes')
Beispiel #16
0
    def __init__(self,
                 file_name=None,
                 sample=None,
                 image_dir=None,
                 dir=None,
                 preload=False,
                 noise_dir=None,
                 logger=None,
                 _nobjects_only=False):
        if sample is not None and file_name is not None:
            raise ValueError("Cannot specify both the sample and file_name!")

        from galsim._pyfits import pyfits
        self.file_name, self.image_dir, self.noise_dir, _ = \
            _parse_files_dirs(file_name, image_dir, dir, noise_dir, sample)

        self.cat = pyfits.getdata(self.file_name)
        self.nobjects = len(self.cat)  # number of objects in the catalog
        if _nobjects_only: return  # Exit early if that's all we needed.
        ident = self.cat.field('ident')  # ID for object in the training sample

        # We want to make sure that the ident array contains all strings.
        # Strangely, ident.astype(str) produces a string with each element == '1'.
        # Hence this way of doing the conversion:
        self.ident = ["%s" % val for val in ident]

        self.gal_file_name = self.cat.field(
            'gal_filename')  # file containing the galaxy image
        self.psf_file_name = self.cat.field(
            'PSF_filename')  # file containing the PSF image

        # Add the directories:
        # Note the strip call.  Sometimes the filenames have an extra space at the end.
        # This gets rid of that space.
        self.gal_file_name = [
            os.path.join(self.image_dir, f.strip()) for f in self.gal_file_name
        ]
        self.psf_file_name = [
            os.path.join(self.image_dir, f.strip()) for f in self.psf_file_name
        ]

        # We don't require the noise_filename column.  If it is not present, we will use
        # Uncorrelated noise based on the variance column.
        try:
            self.noise_file_name = self.cat.field(
                'noise_filename')  # file containing the noise cf
            self.noise_file_name = [
                os.path.join(self.noise_dir, f) for f in self.noise_file_name
            ]
        except:
            self.noise_file_name = None

        self.gal_hdu = self.cat.field(
            'gal_hdu')  # HDU containing the galaxy image
        self.psf_hdu = self.cat.field(
            'PSF_hdu')  # HDU containing the PSF image
        self.pixel_scale = self.cat.field(
            'pixel_scale')  # pixel scale for image (could be different
        # if we have training data from other datasets... let's be general here and make it a
        # vector in case of mixed training set)
        self.variance = self.cat.field(
            'noise_variance')  # noise variance for image
        self.mag = self.cat.field('mag')  # apparent magnitude
        self.band = self.cat.field(
            'band')  # bandpass in which apparent mag is measured, e.g., F814W
        self.weight = self.cat.field(
            'weight')  # weight factor to account for size-dependent
        # probability
        if 'stamp_flux' in self.cat.names:
            self.stamp_flux = self.cat.field('stamp_flux')

        self.saved_noise_im = {}
        self.loaded_files = {}
        self.logger = logger

        # The pyfits commands aren't thread safe.  So we need to make sure the methods that
        # use pyfits are not run concurrently from multiple threads.
        from multiprocessing import Lock
        self.gal_lock = Lock()  # Use this when accessing gal files
        self.psf_lock = Lock()  # Use this when accessing psf files
        self.loaded_lock = Lock()  # Use this when opening new files from disk
        self.noise_lock = Lock(
        )  # Use this for building the noise image(s) (usually just one)

        # Preload all files if desired
        if preload: self.preload()
        self._preload = preload
Beispiel #17
0
 def __init__(self):
     self.blocks_lock = Lock()
     self.db_init()
        ef_map[line[0]].append(line[1])
for line in open(fmap2):
    line = line.rstrip('\n').split('@@@')
    if len(line) != 2:
        continue
    vocab_f.append(line[0])
    if fe_map.get(line[1]) == None:
        fe_map[line[1]] = [line[0]]
    else:
        fe_map[line[1]].append(line[0])

print "Loaded en_de de_en mappings."

#en:...
manager = Manager()
lock1 = Lock()

past_num = Value('i', 0, lock=True)
score = manager.list()#store hit @ k

rank = Value('d', 0.0, lock=True)
rank_num = Value('i', 0, lock=True)

cpu_count = multiprocessing.cpu_count()
t0 = time.time()
def test(model, vocab, index, src_lan, tgt_lan, map, score, past_num):
    while index.value < len(vocab):
        id = index.value
        index.value += 1
        word = vocab[id]
        if id % 100 == 0:
        #  Sinaliza o produtor para que ele possa produzir
        empty.release()

        #  Coloca a thread para dormir
        sono = random.randint(0, 10)
        print 'Consumidor %d dormindo por %d segundos.' % (numConsumidor, sono)
        time.sleep(sono)
    return


if __name__ == '__main__':

    BUFFER_SIZE = 5  #  Tamanho do buffer
    buffer = Array('i', BUFFER_SIZE)  #  Nosso buffer limitado inicializado
    lock = Lock()  #  Lock mutex
    empty = Semaphore(
        BUFFER_SIZE)  #  Semaforo que guarda a quantidade de posições livres
    full = Semaphore(0)  #  Semaforo que guarda o número de posicoes ocupadas
    a_consumir = Value('i', 0)  #  Índice do próximo item a ser consumido
    a_produzir = Value('i', 0)  #  Índice do próximo item a ser produzido
    timeout = time.time() + int(
        sys.argv[1])  #  Tempo que o algoritmo irá rodar
    produtores = []  #  Lista de inicialização dos produtores
    consumidores = []  #  Lista de inicialização dos consumidores

    #  Iniciando os produtores
    for i in range(int(sys.argv[2])):
        Process(target=produtor,
                args=(lock, empty, full, buffer, a_produzir, BUFFER_SIZE, i,
                      timeout)).start()
Beispiel #20
0
import koji
import koji_cli.lib
import requests
from requests_kerberos import HTTPKerberosAuth

logger = logutil.getLogger(__name__)

# ============================================================================
# Brew/Koji service interaction functions
# ============================================================================

# Populated by watch_task. Each task_id will be a key in the dict and
# each value will be a TaskInfo: https://github.com/openshift/enterprise-images/pull/178#discussion_r173812940
watch_task_info = {}
# Protects threaded access to watch_task_info
watch_task_lock = Lock()


def get_watch_task_info_copy():
    """
    :return: Returns a copy of the watch_task info dict in a thread safe way. Each key in this dict
     is a task_id and each value is a koji TaskInfo with potentially useful data.
     https://github.com/openshift/enterprise-images/pull/178#discussion_r173812940
    """
    with watch_task_lock:
        return dict(watch_task_info)


def watch_task(brew_hub, log_f, task_id, terminate_event):
    end = time.time() + 4 * 60 * 60
    watcher = koji_cli.lib.TaskWatcher(
Beispiel #21
0
 def create(self):
     if self._lock is None:
         self._lock = Lock()
Beispiel #22
0
    if 0 == len(ips):
        print 'No ips found, quit ...'
    return ips


def initial():
    global csv_writer, process_lock, ssh_key, ips, csv_fp, process_list
    try:
        ssh_key = paramiko.RSAKey.from_private_key_file('/root/.ssh/id_rsa')
        csv_fp = open('/tmp/agent_result.csv', 'wb', 0)
        csv_writer = csv.writer(csv_fp)
    except Exception, e:
        print e
        sys.exit(1)
    ips = get_hosts(options.host)
    process_lock = Lock()
    process_list = {}
    return


def get_options():
    global options
    usage = '%s\n\t-R remote-cmd -H ip-file -P process-num -T timeout' % sys.argv[
        0]
    usage += '\n\t-S local-file -D remote-dir -H ip-file -P process-num -T timeout'
    usage += '\n\t-L local-cmd -H ip-file -P process-num -T timeout'
    usage += '\nThe pattern "IIPP" in options "RSDL" will be replaced by the ip contained in each process.'
    usage += '\nThe result file is /tmp/agent_result.csv'
    parser = OptionParser(usage)
    parser.add_option('-R',
                      '--remote_cmd',
Beispiel #23
0
def prob_cond(parser_parameters,region,theta,nProcess,ratio,n,prefix,pileup_prefix,ancestral):
    lock = Lock()
    task_queue = Queue()
    done_queue = Queue()
    block = 10000
    pileup = pp.openPileup(pileup_prefix, 'rb')

    if region:
        chro = region[0]
        start = region[1]
        end = region[2]
        offset_default = pileup.tell()
        pileup_line = pileup.readline()
        a = pileup_line.split()[0]
        while(a != chro):
            offset_default = pileup.tell()
            pileup_line = pileup.readline()
            try:
                a = pileup_line.split()[0]
            except IndexError:
                #if the pileup_line can't be splited, that's the end of the file
                print ('ERROR : chro %s not found' % (chro))
                sys.exit()
            #...
        #...
        if start:
            a = int(pileup_line.split()[1])
            b = pileup_line.split()[0]
            if a > end:
                print ('ERROR : interval\'s positions not found.')
                sys.exit()
            #...
            while a < start and b == chro:
                offset_default = pileup.tell()
                pileup_line = pileup.readline()
                try:
                    a = int(pileup_line.split()[1])
                    b = pileup_line.split()[0]
                except IndexError:
                    #if the pileup_line can't be splited, that's the end of the file
                    print ('ERROR : interval\'s positions not found.')
                #...
            #...
            if b != chro:
                print ('ERROR : interval\'s positions not found.')
                sys.exit()
        #...
        offset_table = [offset_default]
        nbLine = 0
        split_pileup = pileup_line.split()
        while split_pileup[0] == chro:
            if start:
                if int(split_pileup[1]) > end:
                    break
                #...
             #...
            nbLine += 1
            if nbLine % block == 0:
                offset_table.append(pileup.tell())
            #...
            pileup_line = pileup.readline()
            split_pileup = pileup_line.split()
            if len(split_pileup) == 0:
                break
            #...
        #...
    #...
    else:
        offset_table = [0]
        nbLine = 0
        pileup_line = pileup.readline()
        while(pileup_line != ''): #if pileup_line == '', that's the end of the file
            nbLine += 1
            if nbLine % block == 0:
                offset_table.append(pileup.tell())
            #...
            pileup_line = pileup.readline()
        #...
    #...
    pileup.close()

    #for each offset except the last one
    for offset in offset_table[:-1]:
        task_queue.put([offset,block])
    #...

    #management of the last line_block
    if nbLine % block != 0:
        task_queue.put([offset_table[-1],nbLine % block])
    #...

    del offset_table

    for i in range(nProcess):
        task_queue.put('STOP')
    #...

    for i in range(nProcess):
        p = Process(target=process_probCond,args=(task_queue,done_queue,lock,pileup_prefix,parser_parameters,ratio,n,ancestral)).start()
    #...

    while task_queue.qsize() != 0:
        pass
    #...

    p_neutral = []
    for i in range(done_queue.qsize()):
        p_neutral += done_queue.get()
    #...

    p_neutral = np.array(p_neutral)
    p_neutral = comp_spectrum(p_neutral,n,theta,ancestral)
    np.savetxt(prefix + '.spectrum', np.array([p_neutral]),delimiter=' ', fmt='%.6e')
    return p_neutral
Beispiel #24
0
from time import sleep
from multiprocessing import Process, Value, Lock


def counter(c, l):
    # without lock, you may not get the same answer consistently
    for i in range(10):
        sleep(.5)
        l.acquire()
        c.value += 1
        l.release()

    return 0


if __name__ == '__main__':
    v = Value('i', 0)  #create a value zero that is integer
    l = Lock()
    p1 = Process(target=counter, args=(v, l))
    p2 = Process(target=counter, args=(v, l))
    p1.start()
    p2.start()
    p1.join()
    p2.join()
    print(v.value)
 def __init__(self, starting_set=set()):
     self.seen = starting_set
     self.seen_lock = Lock()
     self.queue = Queue()
        self.filename = filename
        self.linenumber = linenumber
        return self

    def __getnewargs__(self):
        return unicode(self), self.filename, self.linenumber

class PyCode(magic.FakeStrict):
    __module__ = "renpy.ast"
    def __setstate__(self, state):
        (_, self.source, self.location, self.mode) = state
        self.bytecode = None

class_factory = magic.FakeClassFactory((PyExpr, PyCode), magic.FakeStrict)

printlock = Lock()

# API

def read_ast_from_file(in_file):
    # .rpyc files are just zlib compressed pickles of a tuple of some data and the actual AST of the file
    raw_contents = in_file.read()
    if raw_contents.startswith("RENPY RPC2"):
        # parse the archive structure
        position = 10
        chunks = {}
        while True:
            slot, start, length = struct.unpack("III", raw_contents[position: position + 12])
            if slot == 0:
                break
            position += 12
Beispiel #27
0
def main():
    problem_list = ["QAP"]  # "Pump","NKL","QAP"
    sourrogate_list = ["RF", "SVM"]  # RBFN KRIGING "RF","SVM","KNN"

    for problem in problem_list:
        for sourrogate in sourrogate_list:

            probInit = InitProblem()
            numThreads = 30
            niter = 100
            FileNumToSave = 999
            withSurr = True
            withThreads = True
            intiNumIterWituoutSurr = 20
            isKmeans = False

            if withSurr:
                numToCompare = 2
            else:
                numToCompare = 1

            lock2 = Lock()
            fileName = sourrogate + "_" + str(FileNumToSave)

            if problem == "Pump":
                typeExp, numPar, popSize, fitness, DEVector, max_attainable, opt, fixNumber = probInit.swedishPump(
                )
            elif problem == "QAP":
                typeExp, numPar, popSize, fitness, DEVector, max_attainable, opt, fixNumber = probInit.QAP(
                )
            else:  # NKL
                typeExp, numPar, popSize, fitness, DEVector, max_attainable, opt, fixNumber = probInit.NKLandscape(
                )

            call_F_function_counter = popSize * 70  # 2100
            ans = [[] for i in range(numToCompare)]

            for seed in range(numToCompare):
                start_time = datetime.now()
                psutil.cpu_percent(interval=None)
                psutil.cpu_times_percent(interval=None)
                avr = 0

                if seed == 1:
                    withSurr = False

                cat = CatES(fitness, DEVector, typeExp, numPar, popSize, seed,
                            max_attainable, niter, opt, fixNumber, sourrogate,
                            FileNumToSave, call_F_function_counter, withSurr,
                            problem, intiNumIterWituoutSurr, isKmeans)
                #cat.set_exp()
                print("iteration number {}".format(seed + 1))
                fmax = []
                fmaxSum = 0
                numIter = niter
                num_call_F = call_F_function_counter
                maxFitList = []
                maxVectorList = []

                if withThreads:
                    fmax.append(
                        Parallel(n_jobs=numThreads,
                                 verbose=0)(delayed(cat.test_exp)(i)
                                            for i in range(numThreads)))

                    for i in range(numThreads):
                        maxFitList.append(fmax[0][i][0][-1])
                        maxVectorList.append(fmax[0][i][1][-1])
                        fmaxSum = fmaxSum + fmax[0][i][0][-1]
                    num_call_F = num_call_F - fmax[0][0][2]
                    numIter = fmax[0][0][3]

                else:
                    fmax.append(cat.test_exp(numThreads))

                    maxFitList.append(fmax[0][0][-1])
                    maxVectorList.append(fmax[0][1][-1])
                    fmaxSum = fmaxSum + fmax[0][0][-1]
                    num_call_F = num_call_F - fmax[0][2]
                    numIter = fmax[0][3]

                avr = avr + fmaxSum
                ans[seed].append(maxFitList)

                if (opt == 'min'):
                    index = maxFitList.index(min(maxFitList))
                    bestFitt = min(maxFitList)
                else:
                    index = maxFitList.index(max(maxFitList))
                    bestFitt = max(maxFitList)

                bestVector = str(maxVectorList[index])[1:-1].replace("\n", "")
                bestVector = bestVector.replace("          ", ",")
                bestVector = bestVector.replace("  ", ",")
                print("\naverage of max fitness in {} different runs is: {}".
                      format(numThreads, (avr / numThreads)))
                print("\nBest fitness is {} with vector: {}".format(
                    bestFitt, bestVector))

                # general details
                with open(
                        "/home/naamah/Documents/CatES/result_All/" + problem +
                        "/" + fileName + "/General_Info.txt", 'a') as file:
                    if withSurr:
                        file.write("\b With Surrogate model \b" + "\n")
                    else:
                        file.write('\b Without Surrogate model \b' + '\n')

                    end_time = datetime.now()
                    stri = 'Duration: {}'.format(end_time - start_time)
                    cpu_percent = "cpu percent: {}".format(
                        str(psutil.cpu_percent(interval=None)))
                    cpu_time = "cpu time: {}".format(
                        str(psutil.cpu_times_percent(interval=None)))

                    file.write("Num of Threads: {}".format(numThreads) + "\n")
                    file.write("Num of Iterations (for each Thread): {}".
                               format(numIter) + "\n")
                    file.write(
                        "Num of calls to the F functions (for each Thread): {}"
                        .format(num_call_F) + "\n")
                    file.write(
                        "Num of iterations before using the surrogate model (for each Thread): {}"
                        .format(intiNumIterWituoutSurr) + "\n")
                    if sourrogate == "RBFN":
                        file.write(
                            "The centers were selected with the help Kmeans algorithem: {}"
                            .format(isKmeans) + "\n")

                    file.write(stri + "\n")
                    file.write(cpu_percent + "\n")
                    # file.write(cpu_time + "\n")
                    file.write("Avrage Fittness: {}".format(avr / numThreads) +
                               "\n")
                    file.write("Best Fittness: {}".format(bestFitt) + "\n")
                    file.write("Best Vector: {}".format(bestVector) + "\n\n\n")

                    ###print(stri)
                    ###print(cpu_percent)
                file.close()

            if numToCompare > 1:
                a = np.asarray(ans[0])
                b = np.asarray(ans[1])

                twosample_results = stats.ttest_ind(a[0], b[0])

                # significate
                print("\n\n***  Stats  ***")
                print("t: {} \np: {}".format(twosample_results[0],
                                             twosample_results[1]))
                with open(
                        "/home/naamah/Documents/CatES/result_All/" + problem +
                        "/" + fileName + "/General_Info.txt", 'a') as file:
                    file.write("T test: {}".format(twosample_results[0]) +
                               "\n")
                    file.write("P value: {}".format(twosample_results[1]) +
                               "\n")
                file.close()

                # boxplot
                try:
                    data = [a[0], b[0]]
                    fig, axs = plt.subplots(1, 2, sharey=True)
                    plt.suptitle("{} - {} \n P Value: {}".format(
                        problem, sourrogate, twosample_results[1]))

                    axs[0].boxplot(a[0])
                    axs[0].set_title('with Sorrugate')
                    plt.ylim(
                        min(min(a[0]), min(b[0])) * 0.98,
                        max(max(a[0]), max(b[0])) * 1.02)
                    axs[1].boxplot(b[0])
                    axs[1].set_title("without Sorrugate")

                    plt.subplots_adjust(left=0.2, wspace=0.8, top=0.8)

                    plt.savefig("/home/naamah/Documents/CatES/result_All/" +
                                problem + "/" + fileName + "/BoxPlot")
                    # plt.show()
                    plt.close('all')
                    plt.close(fig)
                except RuntimeWarning:
                    print("i am in the warning")
Beispiel #28
0
import socket
import signal
import time
import threading
import random
import sys
import os

RECV_SIZE = 1024
HOST = ''
CLIENTHOST = ''
CLIENTPORT = 0
HEARTBEAT = 30
PORT = 0
USERNAME = ''
lock = Lock()
p2p_lock = Lock()
p2p_port = 0
p2p_ip = ''
p2p_user = ''


# ^C graceful termination
def ctrl_c_handler(signum, frame):
    exit(0)


# delay send to fix a python socket issue
def delay_send(connection, code, message):
    try:
        connection.sendall(code)
Beispiel #29
0
        labels = np.concatenate([labels, np.array(dics[i][b'labels'])], axis=0)
    mean = np.mean(data, axis=0)
    std = np.std(data, axis=0)
    p = np.arange(50000, dtype=np.int16)
    np.random.shuffle(p)
    data = np.array([data[i] for i in p])
    labels = np.array([labels[i] for i in p])
    import pickle
    with open("meanstd.data", "wb") as f:
        pickle.dump([mean, std], f)
    train_dataset = [data[:49500], labels[:49500]]
    valid_dataset = [data[49500:], labels[49500:]]
    lis = []
    que = Queue(1)
    que.put(0)
    lock = Lock()

    p = "lyy.CIFAR10.resnet20.train"
    for i in range(args.t):
        proc = P(target=worker,
                 args=(train_dataset, p, que, lock, True, mean, std))
        proc.start()
        lis.append(proc)

    que_val = Queue(1)
    que_val.put(0)
    p_val = "lyy.CIFAR10.resnet20.valid"
    proc = P(target=worker,
             args=(valid_dataset, p_val, que_val, lock, False, mean, std))
    proc.start()
    lis.append(proc)
Beispiel #30
0
 def __init__(self, path: str):
     super().__init__(path)
     self.lock = Lock()