Exemplo n.º 1
0
class Frame(object):
    def __init__(self, width, height, channels, array_type_code):
        self.__lock = RLock()
        self.__header = Value(Header,
                              width,
                              height,
                              channels,
                              0,
                              lock=self.__lock)
        self.__image = Array(array_type_code,
                             self.__header.width * self.__header.height *
                             channels,
                             lock=self.__lock)
        self.__latch = StateLatch(State.READY, self.__lock)

    def copy(self, dst):
        memmove(addressof(dst.image.get_obj()),
                addressof(self.__image.get_obj()),
                sizeof(self.__image.get_obj()))
        memmove(addressof(dst.header.get_obj()),
                addressof(self.__header.get_obj()),
                sizeof(self.__header.get_obj()))

    def clear(self):
        self.__header.epoch = 0
        memset(addressof(self.__image.get_obj()), 0,
               sizeof(self.__image.get_obj()))
        memset(addressof(self.__header.detections), 0,
               sizeof(self.__header.detections))

    @property
    def lock(self):
        return self.__lock

    @property
    def header(self):
        return self.__header

    @property
    def image(self):
        return self.__image

    @property
    def latch(self):
        return self.__latch

    def get_numpy_image(self, dtype=None):
        """# Get numpy image from buffer.
        """
        image_shape = (self.header.height, self.header.width,
                       self.header.channels)
        image_np = frombuffer(self.image.get_obj(), dtype).reshape(image_shape)
        return image_shape, image_np
Exemplo n.º 2
0
def shared_array(shape):
    import numpy as np
    from multiprocessing.sharedctypes import Array
    from ctypes import c_double
    # Allocate the memory in shared space.
    memory = Array(c_double, int(np.prod(shape)))
    # Create and return a structure to access the shared memory (numpy array).
    return np.frombuffer(memory.get_obj(), dtype=float).reshape(shape)
class CosmicMulti:
    def __init__(self, n, mass, RxInit, RyInit, VxInit, VyInit, AxInit, AyInit,
                 interactions):
        self.N = n
        self.M = Value('d', mass, lock=False)
        self.R = Array('d', 3 * (self.N + 1))
        self.V = Array('d', 3 * (self.N + 1))
        self.A = Array('d', 3 * (self.N + 1))
        arrR = np.frombuffer(
            self.R.get_obj())  # mp_arr and arr share the same memory
        self.bR = arrR.reshape(
            (3, self.N + 1))  # b and arr share the same memory
        self.bR[0, 0] = RxInit
        self.bR[1, 0] = RyInit
        arrV = np.frombuffer(
            self.V.get_obj())  # mp_arr and arr share the same memory
        self.bV = arrV.reshape(
            (3, self.N + 1))  # b and arr share the same memory
        self.bV[0, 0] = VxInit
        self.bV[1, 0] = VyInit
        arrA = np.frombuffer(
            self.A.get_obj())  # mp_arr and arr share the same memory
        self.bA = arrA.reshape(
            (3, self.N + 1))  # b and arr share the same memory
        self.bA[0, 0] = AxInit
        self.bA[1, 0] = AyInit
        #self.R = np.zeros(shape = (3, self.N+1))
        #self.V = np.zeros(shape = (3, self.N+1))
        #self.A = np.zeros(shape = (3, self.N+1))
        #self.R[0,0] = RxInit
        #self.R[1,0] = RyInit
        #self.V[0,0] = VxInit
        #self.V[1,0] = VyInit
        #self.A[0,0] = AxInit
        #self.A[1,0] = AyInit
        self.Interactions = interactions
Exemplo n.º 4
0
def main():
    url = 'http://192.168.1.1/videostream.cgi'
    user = '******'
    pwd = ''
    auth = requests.auth.HTTPDigestAuth(user, pwd)
    r = requests.get(url, auth=auth, stream=True, timeout=9)

    h = 480
    w = 640
    ch = 3
    img = Array('B', np.zeros(h * w * ch, dtype=np.uint8))
    arr = np.frombuffer(img.get_obj(), dtype=np.uint8)
    img_lock = Event()

    stream = Process(target=stream_video, args=(r, img, img_lock))
    stream.start()

    fps = 60.0
    fourcc = cv2.VideoWriter_fourcc(*'X264')
    out = cv2.VideoWriter('output.mp4', fourcc, fps, (640, 480))

    prev = datetime.now()
    time_interval = 1.0 / fps

    while True:
        now = td_to_ms(datetime.now() - prev)
        if now >= time_interval:
            if not img_lock.is_set():
                img_lock.set()
                frame = arr.reshape((h, w, ch))
                img_lock.clear()
                cv2.imshow('camera', frame)
                out.write(frame)
                prev = datetime.now()
                time_interval = (2.0 / fps) - now

        if cv2.waitKey(1) == 27:
            break

    stream.terminate()
    out.release()
    cv2.destroyAllWindows()

    return
Exemplo n.º 5
0
#print z.max(), z.min(), z.std() #Comment out to verify that the 'random' seed is identical over tests
p = np.ones((w.n, 1), float) 
floor_variable = p
floor = 3

'''START TIMING HERE - AFTER TEST DATA GENERATED'''
time0 = time.time()

#Multiprocessing setup
cores = mp.cpu_count()
cores = cores * 2
numP = len(p)+1
#Shared memory solution space
lockSoln = mp.Lock()
cSoln = Array(ctypes.c_double, numP*cores, lock=lockSoln)
numSoln = np.frombuffer(cSoln.get_obj())
numSoln.shape = (numP,cores)
numSoln[:] = -1
#Shared memory update flag space
lockflag = mp.Lock()
c_updateflag = Array(ctypes.c_int, 3*(cores*2), lock=lockflag) #Do I need different locks? #Why double cores again?
updateflag = np.frombuffer(c_updateflag.get_obj())
updateflag.shape=(3,cores)
updateflag[0] = 1 #True for first iteration. - whether the answer was updated
updateflag[1] = 0 #Iteration counter per core.
for index in range(len(updateflag[2])): #Define the tabu list length for each chord.
    updateflag[2][index] = tabulength(numP)
_init_shared(updateflag)

neighbordict = dict(w.neighbors)#Class instances are not pickable.
Exemplo n.º 6
0
def fit_model(star, nextcomp=2, guess=None, fitpar=[], order=3, niter=3*10**5,
              parallel=0, factr=10**7, mvec=10):
    nstar = star['mag'].shape[0]
    nband = star['mag'].shape[1]
    nintmagpar = len(compute_multinomials(order=order)[0])
    print('Should cut out YSOs somewhere')
    if guess is None:
        # intmagparguess = numpy.zeros((nband, nintmagpar), dtype='f8')
        intmagparguess = numpy.random.randn(nband, nintmagpar)
        extcurveparguess = numpy.random.randn(nextcomp, nband)
        if nband == 12:
            intmagparguess[:, 0] = numpy.array([
                    0.2, -0.5, -0.8, -0.9, -1.0, -2.0, -2.6, -2.7, -2.7, -2.6,
                    -0.1, -1.1], dtype='f8')
            # just get the constant term right.
            if nextcomp > 0:
                # probably: grizYJHK12BR
                extcurveparguess[0, :] = numpy.array(
                    [3.5, 2.7, 2.0, 1.6, 1.3, 0.8, 0.5, 0.3, 0.2, 0.2, 
                     3.5, 2.0])
        starguess = {}
        starguess['extinction'] = star['extprior'].copy()
        starguess['mu'] = parallax_to_mu(
            numpy.clip(star['parallax'], 0.01, numpy.inf))
        for parname in fitpar:
            starguess[parname] = star[parname]
    else:
        intmagparguess, extcurveparguess, starguess = guess
    guess = wrap_param(intmagparguess, extcurveparguess,
                       starguess, nband, nintmagpar, nstar,
                       fitpar=fitpar)
    def chi2_wrapper(param):
        upar = unwrap_param(param, nband, nintmagpar, nstar, nextcomp, 
                            fitpar=fitpar)
        intmagpar, extcurvepar, starpar = upar
        chi2, grad = fullmodel_chi2(star,
                                    intmagpar, extcurvepar, starpar, 
                                    nband=nband, 
                                    gradient=True, fitpar=fitpar,
                                    order=order)
        return chi2, grad
    if parallel > 0:
        from multiprocessing import Queue, Process
        from multiprocessing.sharedctypes import Array
        import ctypes
        qins = [Queue() for i in range(parallel)]
        qout = Queue()
        ind = numpy.floor(numpy.linspace(0, nstar, parallel+1, endpoint=True))
        ind = ind.astype('i4')
        npar = (nintmagpar+nextcomp)*nband+nstar*(1+nextcomp+len(fitpar))
        grad = Array(ctypes.c_double, npar)
        gradnp = numpy.frombuffer(grad.get_obj(), dtype='f8')
        proclist = [
            Process(target=worker,
                    args=(qins[i], qout, grad, star, nextcomp, fitpar, order,
                          ind[i:i+2], i))
            for i in range(parallel)]
        for p in proclist:
            p.start()
        def chi2_wrapper_parallel(param):
            gradnp[:] = 0.
            for i in range(parallel):
                qins[i].put(param)
            chi2 = 0.
            for i in range(parallel):
                tchi2 = qout.get()
                chi2 += tchi2
            # tgrad gets filled in by the workers; it's shared.
            return chi2, gradnp
        wrapper = chi2_wrapper_parallel
    else:
        wrapper = chi2_wrapper
    from scipy.optimize import fmin_l_bfgs_b, fmin_cg
    res = fmin_l_bfgs_b(wrapper, guess, m=mvec, iprint=10,
                        maxiter=niter, maxfun=niter, factr=factr)
    # cg_chi2_wrapper = lambda x: wrapper(x)[0]
    # cg_grad_wrapper = lambda x: wrapper(x)[1]
    # res = fmin_cg(cg_chi2_wrapper, guess, fprime=cg_grad_wrapper, 
    #               maxiter=niter)
    return res
Exemplo n.º 7
0
def start_distributed_mp():

    # Setup

    # To continue training, give path to state dict
    state_dict_path = None 

    
    # Learner specific
    learner_training_steps   = 1000000
    learner_learning_rate    = 0.00025
    learner_policy_update    = 50
    learner_optimizer        = 'Adam'
    learner_device           = 'cuda'
    learner_job_max_time     = 60*60*24 -60*10 #2 hours 58min
    learner_save_date        = datetime.now().strftime("%d_%b_%Y_%H_%M_%S")
    learner_eval_p_errors    = [0.1, 0.2, 0.3]
    learner_eval_no_episodes = 10
    learner_eval_freq        = -1 # -1 for no logging
   
    # Actor specific
    actor_max_actions_per_episode  = 75
    actor_size_local_memory_buffer = 100
    actor_no_envs       = 16           #number of envs/actor
    no_cuda_actors      = 1
    no_cpu_actors       = 0
    actor_no_actors     = no_cuda_actors + no_cpu_actors
    #epsilon             = calculateEpsilon(0.8, 7, actor_no_actors * actor_no_envs)
    epsilon             = calculateEpsilon(0.8, 7, actor_no_envs)
    epsilon_delta       = 0.005
    env_p_error_interval_start    = 0.1
    env_p_error_interval_final    = 0.3
    env_p_error_interval_increase = 0.00005
    env_p_error_strategy          = 'random' # either {'random', 'linear'}
    
    # Replay Memory specific
    replay_memory_size                  = 1000000
    replay_memory_alpha                 = 0.6
    replay_memory_beta                  = 0.4
    replay_memory_size_before_sampeling = 5000#replay_memory_size*0.05
    replay_memory_batch_in_queue_limit  = 10 #number of batches in queue to learner
    log_priority_dist                   = True
    log_write_frequency                 = 500
    log_priority_sample_max             = 10
    log_priority_sample_interval_size   = 0.01
    
    # Shared
    batch_size = 16
    discount_factor = 0.95
    env = "toric-code-v0"
    env_config = {  "size":9,
                    "min_qubit_errors": 0,
                    "p_error": 0.1
            }

    model = ResNet18
    #model = NN_11
    #model = NN_17
    model_config = {"system_size": env_config["size"],
                    "number_of_actions": env_config["size"]
                    }

    if not state_dict_path == None: 
        checkpoint = torch.load(state_dict_path, map_location=learner_device)
    else:
        checkpoint = None

    # Pre-load initial network weights
    if model == NN_11 or model == NN_17:
        m = model(model_config["system_size"], model_config["number_of_actions"], learner_device)
    else: 
        m = model()

    # load checkpoint params
    if not state_dict_path == None: 
        m.load_state_dict(checkpoint['model_state_dict'])

    params      = parameters_to_vector(m.parameters()).detach().cpu().numpy()
    no_params   = len(params)
    
    #Comm setup 
    actor_io_queue = mp.Queue()
    learner_io_queue = mp.Queue()
    io_learner_queue = mp.Queue()
    shared_mem_weight_id  = Value('i')
    shared_mem_weight_id.value = 0

    # Write initial weights to shared memory
    shared_mem_weights    = Array('d', no_params)            # Shared memory for weights
    mem_reader = np.frombuffer(shared_mem_weights.get_obj()) # create memory reader for shared mem
    np.copyto(mem_reader, params)                            # Write params to shared mem
    
    del m # delete tmp model to load network params to free up mem
    
    """
        Learner Process
    """
    learner_args = {
        "train_steps"                   :learner_training_steps,
        "batch_size"                    :batch_size,
        "learning_rate"                 :learner_learning_rate,
        "policy_update"                 :learner_policy_update,
        "discount_factor"               :discount_factor,
        "optimizer"                     :learner_optimizer,
        "model"                         :model,
        "model_config"                  :model_config,
        "model_no_params"               :no_params,
        "device"                        :learner_device,
        "env"                           :env,
        "env_config"                    :env_config,
        "job_max_time"                  :learner_job_max_time,
        "save_date"                     :learner_save_date,
        "learner_io_queue"              :learner_io_queue,
        "io_learner_queue"              :io_learner_queue,
        "shared_mem_weights"            :shared_mem_weights,
        "shared_mem_weight_id"          :shared_mem_weight_id,
        "learner_eval_p_errors"         :learner_eval_p_errors,
        "learner_eval_no_episodes"      :learner_eval_no_episodes,
        "learner_eval_freq"             :learner_eval_freq,
        "learner_checkpoint"            :checkpoint
    }
    
    
    """
        Memory Process
    """
    mem_args = {
        "capacity"                          :replay_memory_size,
        "alpha"                             :replay_memory_alpha,
        "beta"                              :replay_memory_beta,
        "batch_size"                        :batch_size,
        "io_learner_queue"                  :io_learner_queue,
        "learner_io_queue"                  :learner_io_queue,
        "actor_io_queue"                    :actor_io_queue,
        "batch_in_queue_limit"              :replay_memory_batch_in_queue_limit,
        "no_actors"                         :actor_no_actors,
        "replay_size_before_sampling"       :replay_memory_size_before_sampeling if not (replay_memory_size_before_sampeling is None) else min(batch_size, int(replay_memory_size*0.1)),
        "save_date"                         :learner_save_date,
        "log_priority_dist"                 :log_priority_dist,
        "log_write_frequency"               :log_write_frequency,
        "log_priority_sample_max"           :log_priority_sample_max,
        "log_priority_sample_interval_size" :log_priority_sample_interval_size,
        "start_time"                        :learner_save_date
    }
    
              
    """
        Actor Processes
    """
    actor_args = { 
        "max_actions_per_episode"       :actor_max_actions_per_episode, 
        "size_local_memory_buffer"      :actor_size_local_memory_buffer, 
        "env_config"                    :env_config,
        "model"                         :model,
        "model_config"                  :model_config,
        "model_no_params"               :no_params,
        "env"                           :env,
        "discount_factor"               :discount_factor,
        "no_envs"                       :actor_no_envs,
        "actor_io_queue"                :actor_io_queue,
        "shared_mem_weights"            :shared_mem_weights,
        "shared_mem_weight_id"          :shared_mem_weight_id,
        "epsilon_delta"                 :epsilon_delta,
        "env_p_error_start"             :env_p_error_interval_start,
        "env_p_error_final"             :env_p_error_interval_final,
        "env_p_error_delta"             :env_p_error_interval_increase,
        "env_p_error_strategy"          :env_p_error_strategy,
        "no_cuda_actors"                :no_cuda_actors,
        "no_cpu_actors"                 :no_cpu_actors,
        "log_actor"                     :log_priority_dist
    }

    # log header to tensorboard
    if could_import_tb:
        log("runs/{}/RunInfo/".format(learner_save_date), actor_args, learner_args, mem_args, state_dict_path)

    io_process = mp.Process(target=io, args=(mem_args,))
    actor_process = []    
    for i in range(actor_no_actors):
        if i < no_cuda_actors :
           actor_args["device"] = 'cuda'
        else: 
           actor_args["device"] = 'cpu'
        
        actor_args["id"] = i
        #actor_args["epsilon_final"] = epsilon[i * actor_no_envs : i * actor_no_envs + actor_no_envs]
        actor_args["epsilon_final"] = epsilon
        actor_process.append(mp.Process(target=actor, args=(actor_args,)))
        actor_process[i].start()
    
    io_process.start()
    try:
        learner(learner_args) 
    except:
        tb = SummaryWriter("runs/{}/RunInfo/".format(learner_save_date))
        tb.add_text("RunInfo/Error_Message", sys.exc_info()[0])
        tb.close()

    time.sleep(2)
    print("Training done.")
    for i in range(actor_no_actors):
        actor_process[i].terminate()
    io_process.terminate()
    print("Script complete.")
Exemplo n.º 8
0
def _mutual_proximity_empiric_sparse(S: csr_matrix,
                                     test_set_ind: np.ndarray = None,
                                     min_nnz=0,
                                     verbose: int = 0,
                                     log=None,
                                     n_jobs=None):
    """MP empiric for sparse similarity matrices.

    Please do not directly use this function, but invoke via 
    mutual_proximity_empiric()
    """
    if verbose and log:
        log.message("Starting MP empiric for sparse matrices.")
    self_value = 1.  # similarity matrix
    n = S.shape[0]
    if not n_jobs:
        n_jobs = 1
    elif n_jobs == -1:
        n_jobs = cpu_count()
    else:
        pass

    # This will become S_mp.data
    shared_data = Array(ctypes.c_double, S.data.size)
    shared_data_np = np.ctypeslib.as_array(shared_data.get_obj())

    if verbose and log:
        log.message("Spawning processes and starting MP computation.")
    with Pool(processes=n_jobs,
              initializer=_mpes_init,
              initargs=(S, shared_data)) as pool:
        S_nonzero = filterfalse(lambda ij: ij[0] > ij[1], zip(*S.nonzero()))
        for _ in pool.imap(func=partial(_mpes_sec_dist,
                                        args=(verbose, log, n, min_nnz)),
                           iterable=S_nonzero,
                           chunksize=int(1e5)):
            pass  # output stored by function in shared array
    pool.join()
    if verbose and log:
        log.message("Assemble upper-triangular MP matrix.")
    S_mp = csr_matrix((shared_data_np, S.indices, S.indptr),
                      shape=S.shape,
                      copy=False).tolil()
    del shared_data, shared_data_np
    if verbose and log:
        log.message("Symmetrizing matrix.")
    S_mp += S_mp.T
    # Retain original distances for objects with too few neighbors.
    # That is, keep distances FROM these objects to others (rows), but
    # set distances of other objects TO them to NaN (columns).
    # Returned matrix is thus NOT SYMMETRIC.
    if verbose and log:
        log.message(("Retain original similarities for objects with too few "
                     "neighbors. If there are any, the output matrix will "
                     "not be symmetric anymore! (Rows corresponding to these "
                     "objects will be in original space; corresponding "
                     "columns will contain NaN)."))
    for row in np.argwhere(S.getnnz(axis=1) <= min_nnz):
        row = row[0]  # use scalar for indexing instead of array
        S_mp[row, :] = S.getrow(row)
    if verbose and log:
        log.message("Setting self similarities.")
    for i in range(n):
        S_mp[i, i] = self_value  #need to set self values
    if verbose and log:
        log.message("Converting to CSR matrix and returning.")
    return S_mp.tocsr()
Exemplo n.º 9
0
    
#Setup the test data:
w = pysal.lat2W(10, 10) #A contiguity weights object
z = np.random.random_sample((w.n, 2)) #Each local is assigned two attributes
p = np.ones((w.n, 1), float) #The region that each location belongs to.
floor = 3 #The minimum bound or value for each region

#Grab the number of available core
cores = mp.cpu_count()
cores = cores *2 #Hyperthreading and testing on a dual core MBP
#Grab the length of p
numP = len(p)+1
#Setup a shared mem array for solutions with dim = numP * cores
lock = mp.Lock()
cSoln = Array(ctypes.c_double, numP*cores, lock=lock)
numSoln = np.frombuffer(cSoln.get_obj())
numSoln.shape = (numP,cores)
numSoln[:] = 1
#initSolnSpace(numSoln) #initialize the solution space as a shared memory array
'''The soln space is an array that holds node id as the index and membership as the attribute.'''

neighbordict = dict(w.neighbors) #This is interesting - we can not pass a class instance through apply_async and need to conver to a dict.

pool = mp.Pool(processes=cores) #Create a pool of workers, one for each core
for job in range(cores): #Prep to assign each core a job
    pool.apply_async(initialize, args=(job,z,w,neighbordict,floor,p,numP, cores)) #Async apply each job
pool.close()
pool.join()

sharedSoln = np.frombuffer(cSoln.get_obj())
sharedSoln.shape = (numP, cores)