Beispiel #1
0
 def __init__(self, max_size):
     from multiprocessing import Lock, RawArray, RawValue
     self._max_size = max_size
     self._array = RawArray('c', max_size)
     self._pos = RawValue('L')
     self._size = RawValue('L')
     self._locks = Lock(), Lock(), Lock()
Beispiel #2
0
    def __init__(self, struct, size=20):
        """
        struct - a ctypes object (value, array or struct)
        size - number of slots in the buffer. 
        
        If the number of slots exceeds the number of python ints which 
        will fit in an os.pipe buffer, this will block indefinitely.
        I don't see a way round this. On the plus side, at least it fails
        early, rather than on a call to the put method.
        """
        buf = RawArray(struct, int(size))
        self.buffer = buf
        #self.buffer = numpy.frombuffer(buf, dtype=numpy.dtype(buf._type_))
        stock_out, stock_in = Pipe(duplex=False)
        queue_out, queue_in = Pipe(duplex=False)

        stock_out_lock = Lock()
        stock_in_lock = Lock()
        queue_out_lock = Lock()
        queue_in_lock = Lock()

        self.stock_closed = RawValue('h', 0)
        self.queue_closed = RawValue('h', 0)

        for i in xrange(size):
            stock_in.send(i)
        self.map = {}

        self._put_obj = (stock_out, stock_out_lock, queue_in, queue_in_lock)
        self._ret_obj = (stock_in, stock_in_lock)
        self._get_obj = (queue_out, queue_out_lock)
Beispiel #3
0
    def __init__(self, num_actions, alg_type, opt_type=None, lr=0):
        # Net
        if alg_type in ['q', 'sarsa']:
            self.var_shapes = [
                (8, 8, 4, 16),
                (16),
                (4, 4, 16, 32),
                (32),
                (2592, 256),  #(3872, 256) if PADDING = "SAME" 
                (256),
                (256, num_actions),
                (num_actions)
            ]

            self.size = 0
            for shape in self.var_shapes:
                self.size += np.prod(shape)

            if opt_type == "adam":
                self.ms = self.malloc_contiguous(self.size)
                self.vs = self.malloc_contiguous(self.size)
                self.lr = RawValue(ctypes.c_float, lr)
            elif opt_type == "rmsprop":
                self.vars = self.malloc_contiguous(
                    self.size, np.ones(self.size, dtype=np.float))
            else:  #momentum
                self.vars = self.malloc_contiguous(self.size)

        else:
            # no lstm
            self.var_shapes = [
                (8, 8, 4, 16),
                (16),
                (4, 4, 16, 32),
                (32),
                (2592, 256),  #(3872, 256) 
                (256),
                (256, num_actions),
                (num_actions),
                (256, 1),
                (1)
            ]

            self.size = 0
            for shape in self.var_shapes:
                self.size += np.prod(shape)

            if opt_type == "adam":
                self.ms = self.malloc_contiguous(self.size)
                self.vs = self.malloc_contiguous(self.size)
                self.lr = RawValue(ctypes.c_float, lr)
            if opt_type == "rmsprop":
                self.vars = self.malloc_contiguous(
                    self.size, np.ones(self.size, dtype=np.float))
            elif opt_type == "momentum":
                self.vars = self.malloc_contiguous(self.size)
            else:
                self.vars = self.malloc_contiguous(self.size)
Beispiel #4
0
    def __init__(self, _simulationCommunicator=None):
        print("ImageProcessor object created")
        self.simulationCommunicator = _simulationCommunicator
        #wartosci-rezultaty przetwarzania obrazu
        self.result_x = RawValue('f', 0.0)
        self.result_y = RawValue('f', 0.0)
        self.key = RawValue('i', 0)

        self.corners = np.zeros((4, 2), np.int32)  #pozycje rogow plyty
        self.obstacle_map = RawArray('i', ImageProcessor.obstacle_map_size**2)
        self.obstacle_map_update_counter = 0
    def test_multipush(self):
        '''Tests behavior when multiple servers are pushing jobs simultaneously.'''
        total_completed = RawValue('i')
        total_completed.value = 0

        start_workers, kill_workers = testing_lib.construct_worker_pool(config.num_local_workers(), config.WORKER_ADDRESSES, send_jobs, (), on_recv_result, (total_completed,), num_pushers=config.NUM_PUSHERS)
        start_workers()
        completion = testing_lib.check_for_completion(total_completed, config.NUM_JOBS * config.NUM_PUSHERS, get_timeout(len(config.WORKER_ADDRESSES)))
        kill_workers()
        if not completion:
            self.fail('Not all jobs received: %d / %d' % (total_completed.value, config.NUM_JOBS * config.NUM_PUSHERS))
Beispiel #6
0
    def test_volume(self):
        '''Tests the ability to handle various traffic patterns (trickle, normal, and spike by default).'''
        total_completed = RawValue('i')
        for pattern in config.USAGE_PATTERNS:
            total_completed.value = 0
            total_jobs = pattern.sets * pattern.set_reps

            start_workers, kill_workers = testing_lib.construct_worker_pool(len(config.WORKER_ADDRESSES), config.WORKER_ADDRESSES, send_jobs, (pattern,), on_recv_result, (total_completed,))
            start_workers()
            if not testing_lib.check_for_completion(total_completed, total_jobs, get_timeout(pattern, len(config.WORKER_ADDRESSES))):
                self.fail('Failed on usage pattern: %s' % str(pattern))
            kill_workers()
Beispiel #7
0
    def test_files(self):
        '''Tests that workers can correctly store files in a central location.'''
        total_completed = RawValue('i')
        total_completed.value = 0
        if not os.path.exists(os.path.join(os.path.dirname(__file__), 'testing_files/')):
            os.mkdir('testing_files')

        start_workers, kill_workers = testing_lib.construct_worker_pool(config.num_local_workers(), config.WORKER_ADDRESSES, send_jobs, (), on_recv_result, (total_completed,))
        start_workers()
        if not testing_lib.check_for_completion(total_completed, config.NUM_FILES, get_timeout(len(config.WORKER_ADDRESSES))):
            self.fail('Not all jobs received: %d / %d' % (total_completed.value, config.NUM_FILES))
        kill_workers()
        os.rmdir('testing_files')
Beispiel #8
0
    def test_mem(self):
        '''Tests that memory required to transfer jobs does not exceed the given amount.'''
        total_completed = RawValue('i')
        print 'Constructing test string (this could take some time).'
        test_str = generate_str(config.STR_LEN)
        print 'Done. Starting test.'
        total_completed.value = 0

        start_workers, kill_workers = testing_lib.construct_worker_pool(config.num_local_workers(), config.WORKER_ADDRESSES, send_jobs, (test_str,), on_recv_result, (total_completed,))
        start_workers()
        completion = testing_lib.check_for_completion(total_completed, config.NUM_STRINGS, get_timeout())
        kill_workers()
        if not completion:
            self.fail('Not all jobs received: %d / %d' % (total_completed.value, config.NUM_STRINGS))
    def test_multiprocessing(self):
        '''Tests that jobs will execute properly on multiple processes simultaneously.'''
        total_completed = RawValue('i')
        job_processors = MultiQueue()
        total_completed.value = 0

        start_workers, kill_workers = testing_lib.construct_worker_pool(config.num_local_workers(), config.WORKER_ADDRESSES, send_jobs, (), on_recv_result, (total_completed, job_processors))
        start_workers()
        completion = testing_lib.check_for_completion(total_completed, config.NUM_JOBS, get_timeout(len(config.WORKER_ADDRESSES)))
        kill_workers()
        if not completion:
            self.fail('Not all jobs received: %d / %d' % (total_completed.value, config.NUM_JOBS))
        if not check_load_balance(job_processors):
            self.fail('Not all workers utilized.')
Beispiel #10
0
 def __new__(cls, *args, **kwargs):
     """Make this a singleton class."""
     if cls._instance is None:
         cls._instance = super().__new__(cls)
         cls._instance.val = RawValue("i", 1)
         cls._instance.lock = threading.Lock()
     return cls._instance
Beispiel #11
0
def fasta(n):
    alu = sub(
        r'\s+', '', """
GGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGA
TCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACT
AAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAG
GCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCG
CCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAA
""")

    iub = list(
        zip_longest('acgtBDHKMNRSVWY', (.27, .12, .12, .27), fillvalue=.02))

    homosapiens = list(
        zip('acgt', (0.3029549426680, 0.1979883004921, 0.1975473066391,
                     0.3015094502008)))

    seed = RawValue('f', 42)
    width = 60
    tasks = [
        (copy_from_sequence, [b'>ONE H**o sapiens alu\n', alu, n * 2, width]),
        (random_selection,
         [b'>TWO IUB ambiguity codes\n', iub, n * 3, width, seed]),
        (random_selection,
         [b'>THREE H**o sapiens frequency\n', homosapiens, n * 5, width,
          seed]),
    ]

    for func, args in tasks:
        func(*args)

    output_file.close()
Beispiel #12
0
class ThreadedTraceParser:
    """
    Trace parser that scans a trace using multiple processes

    The scanning processes are forked from the current parser
    and each one is allocated a range of the trace. The parsed
    data items are piped through one or more queues. The current
    process pulls data from the queue(s) and stores it in the
    dataset

    XXX: experimental/broken
    """
    def __init__(self, path, parser, threads=2):
        self.trace = RawValue(py_object, None)
        """Trace in shared memory"""
        self.parser = parser
        """
        Callback that handles each trace block, this is run
        in separate processes
        """
        self.n_threads = threads
        """Number of subprocesses that are spawned"""
        self.path = path
        """Trace path"""

        if not os.path.exists(path):
            raise IOError("File not found %s" % path)
        self.trace = pct.trace.open(path)
        if self.trace is None:
            raise IOError("Can not open trace %s" % path)

    def __len__(self):
        if self.trace:
            return self.trace.size()
        return 0

    def parse(self, *args, **kwargs):
        start = kwargs.pop("start", 0)
        end = kwargs.pop("end", len(self))
        block_size = math.floor((end - start) / self.n_threads)
        start_indexes = np.arange(start, end - block_size + 1, block_size)
        end_indexes = np.arange(start + block_size, end + 1, block_size) - 1
        # the last process consumes any remaining entries left by the
        # rounding of block_size
        end_indexes[-1] = end

        procs = []
        for idx_start, idx_end in zip(start_indexes, end_indexes):
            print(idx_start, idx_end)
            p = Process(target=self.parser,
                        args=(self.path, idx_start, idx_end))
            procs.append(p)

        for p in procs:
            p.start()
        for p in procs:
            p.join()
Beispiel #13
0
    def __init__(self, path, parser, threads=2):
        self.trace = RawValue(py_object, None)
        """Trace in shared memory"""
        self.parser = parser
        """
        Callback that handles each trace block, this is run
        in separate processes
        """
        self.n_threads = threads
        """Number of subprocesses that are spawned"""
        self.path = path
        """Trace path"""

        if not os.path.exists(path):
            raise IOError("File not found %s" % path)
        self.trace = pct.trace.open(path)
        if self.trace is None:
            raise IOError("Can not open trace %s" % path)
Beispiel #14
0
    def __init__(self, maxlen, shape, dtype=np.float32, data=None):
        """
        A buffer object, when full restarts at the initial position

        :param maxlen: (int) the max number of numpy objects to store
        :param shape: (tuple) the shape of the numpy objects you want to store
        :param dtype: (str) the name of the type of the numpy object you want to store
        """
        self.maxlen = maxlen
        self.start = RawValue('L')
        self.length = RawValue('L')
        self.shape = shape
        if data is None:
            self.data = np.zeros((maxlen, ) + shape, dtype=dtype)
        else:
            assert data.shape == (maxlen, ) + shape
            assert data.dtype == dtype
            self.data = data
Beispiel #15
0
    def __init__(self, params, opt_type=None, lr=0, step=0):
        self.var_shapes = [var.get_shape().as_list() for var in params]
        self.size = sum([np.prod(shape) for shape in self.var_shapes])
        self.step = RawValue(ctypes.c_int, step)

        if opt_type == 'adam':
            self.ms = self.malloc_contiguous(self.size)
            self.vs = self.malloc_contiguous(self.size)
            self.lr = RawValue(ctypes.c_float, lr)
        elif opt_type == 'adamax':
            self.ms = self.malloc_contiguous(self.size)
            self.vs = self.malloc_contiguous(self.size)
            self.lr = RawValue(ctypes.c_float, lr)
        elif opt_type == 'rmsprop':
            self.vars = self.malloc_contiguous(
                self.size, np.ones(self.size, dtype=np.float))
        elif opt_type == 'momentum':
            self.vars = self.malloc_contiguous(self.size)
        else:
            self.vars = self.malloc_contiguous(self.size)
    def test_threading(self):
        '''Tests that jobs will execute properly on multiple threads simultaneously.'''
        total_completed = RawValue('i')
        job_processors = MultiQueue()
        def push(vent_port, sink_port, worker_pool):
            worker, close, run_job = parallel.construct_worker(worker_pool, {'vent_port': vent_port, 'sink_port': sink_port})
            for i in range(config.NUM_JOBS):
                run_job(wait_job, (config.WAIT_TIME))
            worker(on_recv_result, (total_completed, job_processors))

        total_completed.value = 0
        port = 5000
        thread.start_new_thread(push, (port, port + 1, config.WORKER_ADDRESSES))
        for i in range(config.num_local_workers() - 1):
            port += 2
            thread.start_new_thread(testing_lib.work, (port, port + 1, config.WORKER_ADDRESSES))
        if not testing_lib.check_for_completion(total_completed, config.NUM_JOBS, get_timeout(len(config.WORKER_ADDRESSES))):
            self.fail('Not all jobs received: %d / %d' % (total_completed.value, len(config.WORKER_ADDRESSES)))
        if not check_load_balance(job_processors):
            self.fail('Not all workers utilized.')
Beispiel #17
0
    def __init__(self, struct, size=20):
        size = int(size) + 1
        self._size = size
        self.buffer = Array(struct, size)
        #dt = numpy.dtype(struct)
        #self.buffer = numpy.frombuffer(self._buffer, dtype=dt)
        self.stock = RawArray('I', size)
        self.queue = RawArray('I', size)

        self.stock_write = RawValue('I', 0)
        self.stock_read = RawValue('I', 0)
        self.queue_write = RawValue('I', 0)
        self.queue_read = RawValue('I', 0)

        self.stock[:] = xrange(size)
        self.stock_write.value = size - 1

        self.stock_lock = Condition(Lock())
        self.queue_lock = Condition(Lock())

        self.map = {}
Beispiel #18
0
def fasta(n):
    alu = sub(r'\s+', '', """
GGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGA
TCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACT
AAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAG
GCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCG
CCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAA
""")

    iub = list(zip_longest('acgtBDHKMNRSVWY',
                           (.27, .12, .12, .27), fillvalue=.02))

    homosapiens = list(zip('acgt', (0.3029549426680, 0.1979883004921,
                                    0.1975473066391, 0.3015094502008)))

    seed = RawValue('f', 42)
    width = 60
    tasks = [
        (copy_from_sequence,
         [b'>ONE H**o sapiens alu\n', alu, n * 2, width]),
        (random_selection,
         [b'>TWO IUB ambiguity codes\n', iub, n * 3, width, seed]),
        (random_selection,
         [b'>THREE H**o sapiens frequency\n', homosapiens, n * 5, width, seed]),
    ]

    if cpu_count() < 2:
        for func, args in tasks:
            func(*args)
    else:
        written_1 = acquired_lock()
        seeded_2 = acquired_lock()
        written_2 = acquired_lock()

        locks_sets = [
            (None, written_1),
            (None, seeded_2, written_1, written_2),
            (seeded_2, None, written_2, None),
        ]

        processes = [
            started_process(target, args + [locks_sets[i]])
                for i, (target, args) in enumerate(tasks)
        ]

        for p in processes:
            p.join()

    output_file.close()
    def __init__(self, num_actions, alg_type, network, opt_type=None, lr=0):
        self.var_shapes = [
            var.get_shape().as_list() for var in tf.global_variables()
        ]

        self.size = 0
        for shape in self.var_shapes:
            self.size += np.prod(shape)

        if opt_type == 'adam':
            self.ms = self.malloc_contiguous(self.size)
            self.vs = self.malloc_contiguous(self.size)
            self.lr = RawValue(ctypes.c_float, lr)
        elif opt_type == 'adamax':
            self.ms = self.malloc_contiguous(self.size)
            self.vs = self.malloc_contiguous(self.size)
            self.lr = RawValue(ctypes.c_float, lr)
        elif opt_type == 'rmsprop':
            self.vars = self.malloc_contiguous(
                self.size, np.ones(self.size, dtype=np.float))
        elif opt_type == 'momentum':
            self.vars = self.malloc_contiguous(self.size)
        else:
            self.vars = self.malloc_contiguous(self.size)
Beispiel #20
0
    def __init__(self, conf={}):
        self.conf = DEFAULT_CONF
        self.conf.update(conf)

        # Init one-to-one mapped variables
        self.net_man = None
        self.state_man = None
        self.traffic_gen = None
        self.bw_ctrl = None
        self.sampler = None
        self.input_file = None
        self.terminated = False
        self.reward = RawValue('d', 0)

        # set the id of this environment
        self.short_id = dc_utils.generate_id()
        if self.conf["parallel_envs"]:
            self.conf["topo_conf"]["id"] = self.short_id
        # initialize the topology
        self.topo = TopoFactory.create(self.conf["topo"],
                                       self.conf["topo_conf"])
        # Save the configuration we have, id does not matter here
        dc_utils.dump_json(path=self.conf["output_dir"],
                           name="env_config",
                           data=self.conf)
        dc_utils.dump_json(path=self.conf["output_dir"],
                           name="topo_config",
                           data=self.topo.conf)
        # set the dimensions of the state matrix
        self._set_gym_matrices()
        # Set the active traffic matrix
        self._set_traffic_matrix(self.conf["tf_index"], self.conf["input_dir"],
                                 self.topo)

        # each unique id has its own sub folder
        if self.conf["parallel_envs"]:
            self.conf["output_dir"] += f"/{self.short_id}"
        # check if the directory we are going to work with exists
        dc_utils.check_dir(self.conf["output_dir"])

        # handle unexpected exits scenarios gracefully
        atexit.register(self.close)
Beispiel #21
0
    def run(self,
            ensemble,
            ncores=None,
            pairwise_align=False,
            align_subset_coordinates=None,
            mass_weighted=True,
            metadata=True):
        '''
        Run the conformational distance matrix calculation.
        
        **Arguments:**
        
        `ensemble` : encore.Ensemble.Ensemble object
            Ensemble object for which the conformational distance matrix will be computed. 
        
        `pairwise_align` : bool
            Whether to perform pairwise alignment between conformations
            
        `align_subset_coordinates` : numpy.array or None
            Use these coordinates for superimposition instead of those from ensemble.superimposition_coordinates
        
        `mass_weighted` : bool
            Whether to perform mass-weighted superimposition and metric calculation
            
        `metadata` : bool
            Whether to build a metadata dataset for the calculated matrix
            
        `ncores` : int
            Number of cores to be used for parallel calculation
        
        **Returns:**
        
        `cond_dist_matrix` : encore.utils.TriangularMatrix object
            Conformational distance matrix in triangular representation.
        '''

        # Decide how many cores have to be used. Since the main process is stopped while the workers do their job, ncores workers will be spawned.
        if not ncores:
            ncores = cpu_count()
        if ncores < 1:
            ncores = 1

        # framesn: number of frames
        framesn = len(ensemble.coordinates)

        # Prepare metadata recarray
        if metadata:
            metadata = array(
                [(gethostname(), getuser(), str(datetime.now()),
                  ensemble.topology_filename, framesn, pairwise_align,
                  ensemble.superimposition_selection_string, mass_weighted)],
                dtype=[('host', object), ('user', object), ('date', object),
                       ('topology file', object), ('number of frames', int),
                       ('pairwise superimposition', bool),
                       ('superimposition subset', object),
                       ('mass-weighted', bool)])

        # Prepare alignment subset coordinates as necessary
        subset_coords = None
        if pairwise_align:
            subset_selection = ensemble.superimposition_selection
            if align_subset_coordinates == None:
                subset_coords = align_subset_coordinates
            else:
                subset_coords = ensemble.superimposition_coordinates

        # Prepare masses as necessary
        subset_masses = None

        if mass_weighted:
            masses = ensemble.atom_selection.masses
            if pairwise_align:
                subset_masses = subset_selection.masses
        else:
            masses = ones((ensemble.coordinates[0].shape[0]))
            if pairwise_align:
                subset_masses = ones((subset_coords[0].shape[0]))

        # matsize: number of elements of the triangular matrix, diagonal elements included.
        matsize = framesn * (framesn + 1) / 2

        # Calculate the number of matrix elements that each core has to calculate as equally as possible.
        if ncores > matsize:
            ncores = matsize
        runs_per_worker = [matsize / int(ncores) for x in range(ncores)]
        unfair_work = matsize % ncores
        for i in range(unfair_work):
            runs_per_worker[i] += 1

        # Splice the matrix in ncores segments. Calculate the first and the last (i,j)
        # matrix elements of the slices that will be assigned to each worker. Each of them will proceed in a column-then-row order
        # (e.g. 0,0 1,0 1,1 2,0 2,1 2,2 ... )
        i = 0
        a = [0, 0]
        b = [0, 0]
        tasks_per_worker = []
        for n in range(len(runs_per_worker)):
            while i * (i - 1) / 2 < sum(runs_per_worker[:n + 1]):
                i += 1
            b = [
                i - 2,
                sum(runs_per_worker[0:n + 1]) - (i - 2) * (i - 1) / 2 - 1
            ]
            tasks_per_worker.append((tuple(a), tuple(b)))
            if b[0] == b[1]:
                a[0] = b[0] + 1
                a[1] = 0
            else:
                a[0] = b[0]
                a[1] = b[1] + 1

        # Allocate for output matrix
        distmat = Array(c_float, matsize)

        # Prepare progress bar stuff and run it
        pbar = AnimatedProgressBar(end=matsize, width=80)
        partial_counters = [RawValue('i', 0) for i in range(ncores)]

        # Initialize workers. Simple worker doesn't perform fitting, fitter worker does.
        if pairwise_align:
            workers = [
                Process(target=self._fitter_worker,
                        args=(tasks_per_worker[i], ensemble.coordinates,
                              subset_coords, masses, subset_masses, distmat,
                              partial_counters[i])) for i in range(ncores)
            ]
        else:
            workers = [
                Process(target=self._simple_worker,
                        args=(tasks_per_worker[i], ensemble.coordinates,
                              masses, distmat, partial_counters[i]))
                for i in range(ncores)
            ]

        workers += [
            Process(target=self._pbar_updater,
                    args=(pbar, partial_counters, matsize))
        ]

        # Start & join the workers
        for w in workers:
            w.start()
        for w in workers:
            w.join()

        # When the workers have finished, return a TriangularMatrix object
        return TriangularMatrix(distmat, metadata=metadata)
 def __init__(self):
     # RawValue because we don't need it to create a Lock:
     self.val = RawValue('d', 0)
     self.num = RawValue('i', 0)
     self.lock = Lock()
Beispiel #23
0
 def __init__(self, initial_value=0):
     self.progress_marker = RawValue(c_ulonglong, initial_value)
     self.lock = Lock()
 def __init__(self, initval=0):
     self.val = RawValue('i', initval)
     self.lock = Lock()
Beispiel #25
0
        print q.get_nowait()
    except:
        print "Queue empty !"
    
    i = INT(0);
    v1=RawValue('i', 0)
    v2=RawValue('i', 0)
    p2 = INTProcess(i, v1, v2);
    p2.start()
    p2.join()
    print i.getI()
    print i.getV()
    print v1.value
    print v2.value

    v = RawValue('i', 0)
    l = Lock()
    nbrProcs = 10
    procs = [LockProcess(v, l, False) for j in range(nbrProcs)]
    for j in range(nbrProcs): procs[j].start()
    for j in range(nbrProcs): procs[j].join()
    print v.value
    v.value = 0
    procs2 = [LockProcess(v, l, True) for j in range(nbrProcs)]
    for j in range(nbrProcs): procs2[j].start()
    for j in range(nbrProcs): procs2[j].join()
    print v.value



Beispiel #26
0
import numpy as np
import csv
import pandas as pd
import re
import linecache
import time
import matplotlib.pyplot as plt
import matplotlib
from multiprocessing import Pool, sharedctypes, RawArray, RawValue
from MS.IBIS import *
from MS.LPJ import *
from MS.Biome_BGC import *
from CMIP import *
plt.switch_backend('tkagg')

counter = RawValue('i')
counter.value = 0
failed_counter = RawValue('i')
failed_counter.value = 0
test = False
options, args = getopt.getopt(sys.argv[1:], '', ['model=', 'test='])
for opt in options:
    if (opt[0] == '--model'):
        if opt[1] == 'IBIS':
            ms_daily = IBIS('daily')
            ms_month = IBIS('month')
        elif opt[1] == 'Biome-BGC':
            ms_daily = Biome_BGC('daily')
            ms_month = Biome_BGC('month')
        elif opt[1] == 'LPJ':
            ms_daily = LPJ('daily')
Beispiel #27
0
    "metricName": "daily-average-GPP"
}, {
    "id": "adnpptot",
    "type": "",
    "description": "daily average NPP",
    "scale": 1000.0,
    "offset": 0.0,
    "unit": "kgC m-2 d-1",
    "metricName": "daily-average-NPP"
}]

folder = DATA_HOME + '/IBIS_Data/5b9012e4c29ca433443dcfab/outputs'
srcSuffix = '.daily.txt'
distSuffix = '.month.txt'

counter = RawValue('i')
counter.value = 0


def convert2Month(i):
    # try:
    counter.value += 1
    print('counter: %s    %5.2f%%    site index: %s' %
          (str(counter.value), counter.value * 100 / 40595, str(i)))
    srcPath = '%s/%s%s' % (folder, str(i), srcSuffix)
    distPath = '%s/%s%s' % (folder, str(i), distSuffix)
    if path.exists(distPath):
        fsize = path.getsize(distPath)
        if fsize > 100000:  # 100k
            rerun = False
        else:
Beispiel #28
0
 def __init__(self):
     self.val = RawValue('i', 0)
     self.lock = Lock()
Beispiel #29
0
    def __init__(self, normalize_inputs, T, gamma, more_obs, average_rewards):
        if os.geteuid() != 0:
            exit(
                "You need to have root privileges to run this script.\nPlease try again, this time using 'sudo'. Exiting."
            )
        clean()
        os.system("systemctl start ovsdb-server.service")
        os.system("systemctl start ovs-vswitchd.service")

        self.T = T
        self.average_rewards = average_rewards
        self.normalize_inputs = normalize_inputs
        self.GAMMA = gamma
        self.max_steps = 10000000
        self.n_agent = 16
        self.nD = self.n_agent
        self.n_signal = 4
        self.n_episode = 300  #for around 3M transitions
        self.max_u = None
        self.input_size = 96
        self.n_actions = 1  #number of dim per agent

        self.conf = DEFAULT_CONF
        # self.conf.update(conf)

        if more_obs:
            self.conf.update(
                {"state_model": ["backlog", "d_backlog", "olimit", "drops"]})
            self.input_size = 336

        # Init one-to-one mapped variables
        self.net_man = None
        self.state_man = None
        self.traffic_gen = None
        self.bw_ctrl = None
        self.sampler = None
        self.input_file = None
        self.terminated = False
        self.reward = RawValue('d', 0)

        # set the id of this environment
        self.short_id = dc_utils.generate_id()
        if self.conf["parallel_envs"]:
            self.conf["topo_conf"]["id"] = self.short_id
        # initialize the topology
        self.topo = TopoFactory.create(self.conf["topo"],
                                       self.conf["topo_conf"])
        # Save the configuration we have, id does not matter here
        dc_utils.dump_json(path=self.conf["output_dir"],
                           name="env_config",
                           data=self.conf)
        dc_utils.dump_json(path=self.conf["output_dir"],
                           name="topo_config",
                           data=self.topo.conf)
        # set the dimensions of the state matrix
        self._set_gym_matrices()
        # Set the active traffic matrix
        self._set_traffic_matrix(self.conf["tf_index"], self.conf["input_dir"],
                                 self.topo)

        # each unique id has its own sub folder
        if self.conf["parallel_envs"]:
            self.conf["output_dir"] += f"/{self.short_id}"
        # check if the directory we are going to work with exists
        dc_utils.check_dir(self.conf["output_dir"])

        # handle unexpected exits scenarios gracefully
        atexit.register(self.close)

        self.compute_neighbors = False
        self.neighbors_size = 4  # max number of neighbor
        self.compute_neighbors_last = np.array([[1, 2, 3], [0, 2,
                                                            3], [0, 1, 3],
                                                [0, 1, 2], [5, 6,
                                                            7], [4, 6, 7],
                                                [4, 5, 7], [4, 5, 6],
                                                [9, 10, 11], [8, 10, 11],
                                                [8, 9, 11], [8, 9, 10],
                                                [13, 14, 15], [12, 14, 15],
                                                [12, 13, 15], [12, 13, 14]])
        self.compute_neighbors_last_index = [
            list(range(len(self.compute_neighbors_last[i])))
            for i in range(self.n_agent)
        ]
        if normalize_inputs:
            self.obs_rms = RunningMeanStd(shape=self.input_size)

        self.fileresults = open('learning.data', "w")
        if not self.is_locked():
            raise IllegalLockStateException()
        self.__exit_lock()
        if not self.is_locked():
            return self.redis_client.delete(self.lock_key)
        return True


# main 中是lock 的测试
if __name__ == "__main__":
    from multiprocessing import Process, RawValue
    import os

    redis_client = redis.StrictRedis("localhost", 6379, 0)
    redis_lock = RedisLock(_lock_key="test_lock", _redis_client=redis_client)
    x = RawValue("i", 0)

    def test_lock(shared_x):
        n = 0
        while n < 3:
            if redis_lock.try_lock(3):
                redis_lock.lock()
                n += 1
                shared_x.value += 1
                print "{0} {1} lock success and do {2}".format(
                    os.getpid(),
                    threading.currentThread().name, shared_x.value)
                # print "do lock thing"
                time.sleep(0.1)
                redis_lock.unlock()
                redis_lock.unlock()
 def __init__(self, value=0):
     # RawValue because we don't need it to create a Lock:
     self.val = RawValue('i', value)
     self.lock = Lock()
Beispiel #32
0
 def __init__(self, value=0):
     self.val = RawValue('i', value)
     self.lock = threading.Lock()
Beispiel #33
0
 def __init__(self, value=0):
     self.val = RawValue('i', value)
     self.lock = Lock()
 def reset(self):
     with self.lock:
         self.val = RawValue('i', self.initial_value)
Beispiel #35
0
 def __init__(self, initval=0):
     self.val = RawValue('i', initval)
     self.last_step_update_target = RawValue('i', initval)
     self.lock = Lock()
Beispiel #36
0
    def __init__(self):
        print("SimulationCommunicator object created")

        #wartosci-rezultaty dzialania symulacji
        self.ball_x = RawValue('f', 0.0)
        self.ball_y = RawValue('f', 0.0)

        self.servo_x = RawValue('i', 0)
        self.servo_y = RawValue('i', 0)

        self.corner_tl_x = RawValue('f', 0.0)
        self.corner_tl_y = RawValue('f', 0.0)
        self.corner_tr_x = RawValue('f', 0.0)
        self.corner_tr_y = RawValue('f', 0.0)
        self.corner_br_x = RawValue('f', 0.0)
        self.corner_br_y = RawValue('f', 0.0)
        self.corner_bl_x = RawValue('f', 0.0)
        self.corner_bl_y = RawValue('f', 0.0)

        self.cameraFrame = RawArray('i', 3 * 256**2)

        #zmienne wartosci
        self.servo_actual_pos = [0, 0]  #aktualna pozycja serwa
        self.servo_target_pos = [0, 0]  #docelowa pozycja serwa

        self.refreshDeltaTime = 1 / 60
        self.frameReadTargetDelta = 1 / 40
        self.frameReadLastTime = 0.0
        self.capturedFrame = np.zeros((256, 256, 3))