Ejemplo n.º 1
0
class WSRunner(Process):
	def __init__(self, url, d):
		Process.__init__(self)
		self.url = url
		self.d = d
		self.lock = Lock()
		self.lock.acquire()
	@asyncio.coroutine
	def ws(self):
		websocket = yield from websockets.connect(self.url)
		update_count = 0
		self.lock.release()
		while update_count < num_loc_updates * num_threads:
			update = yield from websocket.recv()
			update_count += 1
			j = json.loads(update)
			lat = float(j['lat'])
			lng = float(j['lng'])
			player_id = j['player_id']
			self.d[player_id] = (lat, lng)
		websocket.close()

	def run(self):
		loop = asyncio.new_event_loop()
		asyncio.set_event_loop(loop)		
		loop.run_until_complete(self.ws())
Ejemplo n.º 2
0
class SMS_split(object):
    '''split the sms_list for sending '''
    def __init__(self,package_size,phone_list):
        self.phone_list = phone_list
        self.package_size = package_size
        self.package = [elem for elem in range(package_size)]       
        self._lock = Lock()
           
    def __iter__(self):
        #the number of sms which already have be splited       
        self.current_spot = 0
        return self
    
    def next(self):
        self._lock.acquire()
        try:
            if (self.current_spot >= len(self.phone_list)):
                self.current_spot = len(self.phone_list)
                raise StopIteration
            self.package = self.phone_list[self.current_spot :  \
										   self.current_spot+self.package_size]
            self.current_spot += self.package_size
        finally: 
            self._lock.release()
        return self.package

    def set_package_size(self, package_size):
        self.package_size = package_size
    def get_package_size(self):
        return self.package_size

    def get_already_send_num(self):
        return self.current_spot
Ejemplo n.º 3
0
class IPClusterPool(object):
    def __init__(self, workers):
        self.queue_lock = Lock()
        self.queue = []
        self.workers = {w: None for w in workers}

        self.terminated = False
        self.closed = False
        threading.Thread(target=self._manager).start()


    def terminate(self):
        self.terminated = True

    def join(self):
        while self.terminated != True:
            pass

    def close(self):
        self.closed = True

    def apply_async(self, job, *job_args):
        assert isinstance(job, str), "Job has to be string"
        try:
            self.queue_lock.acquire()
            self.queue.append(IPClusterTask(job, job_args))
            return self.queue[-1]
        except Exception, e:
            raise e
        finally:
Ejemplo n.º 4
0
class ImageLogger(object):

    def __init__(self, dev_id):
        config = ConfigParser.ConfigParser()
        config.read(path.dirname(path.realpath(__file__))+"/condor.ini")
        storage_root = config.get('condor', 'storage')
        self.storage = path.join(storage_root, str(dev_id))
        if not path.isdir(self.storage):
            mkdir(self.storage)
        self.l = Lock()

    def image(self, img, name =''):
        self.l.acquire()
        if not name:
            name = str(uuid.uuid4())
        
        imgpath = path.join(self.storage, "%s.png" % (name))
        if path.isfile(imgpath):
            i = 1
            imgpath = path.join(self.storage, "%s-%s.png" % (name, i))
            while path.isfile(imgpath):
                i += 1
                imgpath = path.join(self.storage, "%s-%s.png" % (name, i))
        cv2.imwrite(imgpath,img)
        self.l.release()
Ejemplo n.º 5
0
class Plotter:
    def __init__(self):
        self.getMap()
        self.resolution = 0.05
        self.lock = Lock()
        #rospy.Subscriber("racecar/mcl/current_particles", PoseArray, self.updatePath) # from MCL
    
    def getMap(self):
        image = mapper.read_pgm("mapping/realmap.pgm", byteorder='<')
        croppedMap = mapper.hardCrop(image)
        self.map = np.flipud(np.rot90(np.array([np.array([item for item in row]) for row in croppedMap])))

    def get_path(self):
        self.lock.aquire()
        self.path = np.load("path.npy")
        self.lock.release()
        #pyx_file = os.path.join()

        

    def plot_path(self):
        print 'plotting path'
        print len(self.map)
        print len(self.path)
        for (x_m, y_m, w) in self.path[:11500]:
            x, y = sensor_update.meters_to_pixel([self.map], self.resolution, x_m, y_m)
            self.map[min(x+40, 220)][y] = 0
        plt.imshow(self.map, plt.cm.gray)
        plt.show()
Ejemplo n.º 6
0
class ImageLogger(object):

    def __init__(self, root):
        self.storage = root
        if not path.isdir(self.storage):
            mkdir(self.storage)
        self.l = Lock()
        self.fmt = 'jpg'

    def image(self,dev_id, img, ts=None):
        self.l.acquire()
        if ts:
            name = datetime.datetime.fromtimestamp(ts).replace(microsecond=0).strftime('%Y%m%d%H%M%S')
        else:
            name = str(uuid.uuid4())
        image_path = path.join(self.storage,str(dev_id))
        if not path.isdir(image_path):
            mkdir(image_path)

        imgpath = path.join(image_path, "%s.%s" % (name, self.fmt))
        i = 0
        while path.isfile(imgpath):
            imgpath = path.join(image_path, "%s-%s.%s" % (name, i, self.fmt))
            i += 1
        cv2.imwrite(imgpath, img)
        self.l.release()
Ejemplo n.º 7
0
def launch_workers(outfile, start_index, end_index, score_flag, force, verbose):
	BASE_URL = "http://www.ign.com/games/all-ajax?startIndex="
	
	
	# Synchronization Tools
	num_workers = Semaphore(MAX_NUM_PROCESSES)
	outfile_lock = Lock()
	urlopen_lock = Lock()
	stderr_lock = Lock()
	print_lock = Lock()
	
	# Write the categories
	if (outfile != None):
		outfile.write("title,link,platform,publisher,score,date\n")

	# Launch the workers
	processes = []
	curr_index = start_index;
	while curr_index <= end_index:
		curr_url = BASE_URL + str(curr_index)
	 	worker = Process(target=open_url_and_parse,
	 		args=(outfile, curr_url, score_flag, force, verbose,
	 			outfile_lock, urlopen_lock, stderr_lock, print_lock,
	 			num_workers))
	 	processes.append(worker)
	 	if verbose:
			print_lock.acquire()
			print "Launching worker for url: %s" % curr_url
			print_lock.release()
	 	num_workers.acquire()
	 	worker.start()
	 	curr_index += INDEX_INCREMENT; 
	for p in processes:
	 	p.join()
Ejemplo n.º 8
0
def setup():
    """Performs basic setup for the daemon and ADC"""
    global exitLock
    logger.debug("Setting up ADC")
    try:
        ADC.setup()
    except RuntimeError as e:
        logger.critical(
            "Attempting to start the BBB GPIO library failed.  This can be "
            "due to a number of things, including:"
        )
        logger.critical(
            "- Too new a kernel.  Adafruit BBIO runs on 3.8.13.  Downgrades "
            "to the version this is tested with can be done easily via:")
        logger.critical(
            "  apt-get install linux-{image,headers}-3.8.13-bone79")
        logger.critical("- Not running on a BBB")
        logger.critical("- Conflicting capes")
        logger.critical("Raw exception: %s", str(e))
        return
    tstat = connect()  # TODO: retries
    logger.debug("Attaching signal handlers")
    signal.signal(signal.SIGINT, handle_exit)
    signal.signal(signal.SIGTERM, handle_exit)
    logger.debug("Building Lock for singal interrupts")
    exitLock = Lock()
    exitLock.acquire()
    logger.debug("Running main loop")
    return tstat
Ejemplo n.º 9
0
def say_hello(name='world', **kwargs):
    """@todo: Docstring for say_hello
    """
    l = Lock()
    l.acquire()
    print "Hello, %s" % name, kwargs
    l.release()
Ejemplo n.º 10
0
 def __init__(self, href_tnum):
     super(HrefProcess, self).__init__()
     self.href_tnum = href_tnum
     self.url_set = set()  # hash list
     self.img_set = set()  # hash list
     self.url_set_mutex = Lock()
     self.img_set_mutex = Lock()
Ejemplo n.º 11
0
 def __setstate__(self, d):
     from multiprocessing import Lock
     self.__dict__ = d
     self.gal_lock = Lock()
     self.psf_lock = Lock()
     self.loaded_lock = Lock()
     self.noise_lock = Lock()
     pass
Ejemplo n.º 12
0
class ObjectManager(object):
    """A thread management object for single object threads"""

    def __init__(self, input_Q=None, output_Q=None, timeout=10, locking=False, lock=None, **kwargs):
        super(ObjectManager, self).__init__(**kwargs)
        self.input = Queue() if input_Q is None else input_Q
        self.output = Queue() if output_Q is None else output_Q
        self._timeout = timeout
        self._locking = locking
        if lock is None:
            self._lock = Lock()
        else:
            self._lock = lock
        self.hdr = {}
        self.hdr["pid"] = current_process().pid
        self.hdr["STATUS"] = self.NORMAL

    def __getattr__(self, attr):
        """Call a method on the underlying threaded object"""

        def method(*args, **kwargs):
            """A threaded method"""
            if self._locking:
                self._lock.acquire()
            self.input.put((self.hdr, attr, args, kwargs), timeout=self._timeout)

        return method

    ERROR = "ERROR"
    NORMAL = "NORMAL"

    @property
    def duplicator(self):
        """Arguments required to duplicate this manager"""
        return {
            "input_Q": self.input,
            "output_Q": self.output,
            "timeout": self._timeout,
            "locking": self._locking,
            "lock": self._lock,
        }

    def retrieve(self, inputs=False, timeout=None):
        """Retrieve a return value off the top of the output queue"""
        timeout = self._timeout if timeout is None else timeout
        try:
            hdr, func, args, kwargs, rvalue = self.output.get(timeout=timeout)
        except QFull, QEmpty:
            raise ThreadStateError(code=2 ** 3, msg="Subthread Ended")
        if self._locking:
            self._lock.release()
        if hdr["STATUS"] == self.ERROR:
            self.clear()
            raise rvalue
        if inputs:
            return func, args, kwargs, rvalue, hdr
        else:
            return rvalue
Ejemplo n.º 13
0
    def __init__(self, file_name=None, image_dir=None, dir=None, preload=False,
                 noise_dir=None, logger=None, _nobjects_only=False):

        from galsim._pyfits import pyfits
        self.file_name, self.image_dir, self.noise_dir = \
            _parse_files_dirs(file_name, image_dir, dir, noise_dir)

        self.cat = pyfits.getdata(self.file_name)
        self.nobjects = len(self.cat) # number of objects in the catalog
        if _nobjects_only: return  # Exit early if that's all we needed.
        ident = self.cat.field('ident') # ID for object in the training sample

        # We want to make sure that the ident array contains all strings.
        # Strangely, ident.astype(str) produces a string with each element == '1'.
        # Hence this way of doing the conversion:
        self.ident = [ "%s"%val for val in ident ]

        self.gal_file_name = self.cat.field('gal_filename') # file containing the galaxy image
        self.psf_file_name = self.cat.field('PSF_filename') # file containing the PSF image

        # Add the directories:
        self.gal_file_name = [ os.path.join(self.image_dir,f) for f in self.gal_file_name ]
        self.psf_file_name = [ os.path.join(self.image_dir,f) for f in self.psf_file_name ]

        # We don't require the noise_filename column.  If it is not present, we will use
        # Uncorrelated noise based on the variance column.
        try:
            self.noise_file_name = self.cat.field('noise_filename') # file containing the noise cf
            self.noise_file_name = [ os.path.join(self.noise_dir,f) for f in self.noise_file_name ]
        except:
            self.noise_file_name = None

        self.gal_hdu = self.cat.field('gal_hdu') # HDU containing the galaxy image
        self.psf_hdu = self.cat.field('PSF_hdu') # HDU containing the PSF image
        self.pixel_scale = self.cat.field('pixel_scale') # pixel scale for image (could be different
        # if we have training data from other datasets... let's be general here and make it a 
        # vector in case of mixed training set)
        self.variance = self.cat.field('noise_variance') # noise variance for image
        self.mag = self.cat.field('mag')   # apparent magnitude
        self.band = self.cat.field('band') # bandpass in which apparent mag is measured, e.g., F814W
        self.weight = self.cat.field('weight') # weight factor to account for size-dependent
                                               # probability

        self.saved_noise_im = {}
        self.loaded_files = {}
        self.logger = logger

        # The pyfits commands aren't thread safe.  So we need to make sure the methods that
        # use pyfits are not run concurrently from multiple threads.
        from multiprocessing import Lock
        self.gal_lock = Lock()  # Use this when accessing gal files
        self.psf_lock = Lock()  # Use this when accessing psf files
        self.loaded_lock = Lock()  # Use this when opening new files from disk
        self.noise_lock = Lock()  # Use this for building the noise image(s) (usually just one)

        # Preload all files if desired
        if preload: self.preload()
        self._preload = preload
Ejemplo n.º 14
0
    class __generator:
        def __init__(self):
            self.lock = Lock()
            self.lastId = 0

        def getId(self):
            self.lock.acquire()
            self.lastId += 1
            self.lock.release()
            return self.lastId
Ejemplo n.º 15
0
 def run(self):
     signal.signal(signal.SIGINT, signal.SIG_IGN)
     l = Lock()
     l.acquire()
     pd = pcapy.open_live(self.iface_name, ethernet.ETHERNET_MTU, 0, 100)
     pcap_filter = "ether proto %s" % hex(frames.WiwoFrame.ethertype) \
                   + " and (ether[14:1] = 0x07 or ether[14:1] = 0x08)"
     pd.setfilter(pcap_filter)
     l.release()
     pd.loop(-1, self.frame_handler)
Ejemplo n.º 16
0
class DataLoader(object):
    def __init__(self, params, db, nn_db):
	self.lock = Lock()
	self.db = db
	self.cur = db.length
        self.im_shape = params['im_shape']
        self.nn_shape = params['nn_shape']
        self.hist_eq = params['hist_eq']
        self.indexes = np.arange(db.length)
	self.shuffle = params['shuffle']
	self.subtract_mean = params['subtract_mean']
	if self.subtract_mean:
	    self.mean_img = self.db.read_mean_img(self.im_shape)
	
	self.im_shape = params['im_shape']
	self.load_nn = params['load_nn']
	self.nn_query_size = params['nn_query_size']
	if self.load_nn:
	    self.nn_db = nn_db
	    #nn_ignore = 1 if db.db_root == nn_db.db_root else 0
	    nn_ignore = 0
	    self.nn = NN(nn_db, params['nn_db_size'], nn_ignore)
   
    def load_next_data(self):
	nid = self.get_next_id()
        jp, imgs, segs = self.db.read_instance(nid, size=self.im_shape)
        item = {'jp':jp}
	for i in xrange(len(imgs)):
	    img = imgs[i]
	    if self.hist_eq:
		img = correct_hist(img)
	    item.update({'img_' + shape_str(self.im_shape[i]):img.transpose((2,0,1)), 'seg_' + shape_str(self.im_shape[i]): segs[i]})
	if self.load_nn:
	    nn_id = self.nn.nn_ids(jp, self.nn_query_size)
	    if hasattr(nn_id, '__len__'):
		nn_id = random.choice(nn_id)
	    nn_jp, nn_imgs, nn_segs = self.nn_db.read_instance(nn_id, size=self.nn_shape)
	    item.update({'nn_jp':nn_jp})
	    for i in xrange(len(nn_imgs)):
		nn_img = nn_imgs[i]
		if self.hist_eq:
		    nn_img = correct_hist(nn_img)
		item.update({'nn_img_' + shape_str(self.nn_shape[i]):nn_img.transpose((2,0,1)), 'nn_seg_' + shape_str(self.nn_shape[i]): nn_segs[i]})    
        return item

    def get_next_id(self):
	self.lock.acquire()
	if self.cur >= len(self.indexes) - 1:
            self.cur = 0
            if self.shuffle:
		random.shuffle(self.indexes)
	else:
	    self.cur += 1
	self.lock.release()
	return self.indexes[self.cur]
Ejemplo n.º 17
0
    def initialize_proposer(self):
        """
            Intializes a proposer process that acts as a proposer Paxos member
            - creates listening socket for client connections
            - initializes connections to other server connections
            - starts main loop for proposer which reads proposal requests off
               a queue of requests
            - server_list is a list of pairs (host, port)
        """
        # the msg type need to handle dup
        proposer_msg_types = [
            MESSAGE_TYPE.PREPARE_ACK,
            MESSAGE_TYPE.PREPARE_NACK,
            MESSAGE_TYPE.ACCEPT_ACK,
        ]
        msg_history = set()

        def if_dup(msg, msg_history):
            # handle duplication
            if msg.msg_type in proposer_msg_types:
                msg_signature = (
                    msg.msg_type,
                    msg.value,
                    msg.proposal,
                    msg.r_proposal,
                    msg.client_id,
                    msg.instance,
                    msg.origin_id)
                if msg_signature in msg_history:
                    # dup, pass
                    # print "dup msg received by proposer!"
                    return True
                else:
                    msg_history.add(msg_signature)
                    return False

        # counter for proposer number
        proposer_num = self.server_id

        # log file
        write_lock = Lock()
        write_lock.acquire()
        logfile = open("server" + str(self.server_id) + ".txt", "w+")
        write_lock.release()

        def send_to_acceptors(msg, server_connections):
            assert isinstance(msg, message.message)
            # send the proposal to acceptors
            for s_socket in server_connections:
                try:
                    s_socket.sendall(pickle.dumps(msg))
                except Exception, e:
                    server_connections.remove(s_socket)
                    print "{}: ERROR - {}".format(self.DEBUG_TAG, e)
                    pass
Ejemplo n.º 18
0
class DataWriter:
    def __init__ (self, targetDir, fps=10.0):
        
        # Create target directory and pose log
        if not os.path.exists(targetDir) :
            os.mkdir (targetDir)
        self.poseFd = open(targetDir+"/pose.csv", 'w')
        self.path = targetDir
        
        self.currentImage = None
        self.currentPose = None
        self.fps = fps
        self.bridge = CvBridge()
        self.currentTimestamp = 0.0

        # Prepare multiprocessing
        self.dataEx = Lock()
        self.process = mp.Process(target=self.start)
        self.end = False
        self.process.start()
    
    def start (self):
        rate = rospy.Rate(self.fps)
        counter = 0
        
        while (self.end == False):
            if self.currentImage is not None and self.currentPose is not None :
                
                self.dataEx.acquire()
                cpImg = copy (self.currentImage)
                cpPose = copy (self.currentPose)
                self.dataEx.release()
                
                curImageName = "{0:06d}.jpg".format (counter)
                
                cv2.imwrite (self.path + "/"+curImageName, self.currentImage)
                poseStr = "{:.6f} {} {} {} {} {} {} {}".format(
                    rospy.Time.now().to_sec(),
                    cpPose.position.x,
                    cpPose.position.y,
                    cpPose.position.z,
                    cpPose.orientation.x,
                    cpPose.orientation.y,
                    cpPose.orientation.z,
                    cpPose.orientation.w)
                self.poseFd.write(poseStr+"\n") 
                
                counter += 1
            rate.sleep()
    
    def close (self):
        self.end = True
        self.poseFd.close()
Ejemplo n.º 19
0
class KNN(object):

    def __init__(self):
        (self.train, self.valid, self.test), _ = load_data(data_path='data/mfcc_{}.npy', theano_shared=False)
        self.train_x, self.train_y = self.train
        self.test_x, self.test_y = self.test
        self.valid_x, self.valid_y = self.valid

        self.train_y = self.train_y.reshape(self.train_y.shape[0])
        self.test_y = self.test_y.reshape(self.test_y.shape[0])

        self.accurate = Value('i', 0)
        self.lck = Lock()
        # self.unique_artists()
        # self.centers = len(self.artists_map.keys())
        self.neigh = KNeighborsClassifier(weights='distance', n_jobs=-1, p=1)

    def unique_artists(self):
        self.artists_map = {}
        for artist in self.data_y:
            temp = self.artists_map.get(artist, 0)
            self.artists_map[artist] = temp + 1

    def fit_data(self):
        self.neigh.fit(self.train_x, self.train_y)

    def test_accuracy(self, st, en, thread_number, var_x, var_y):
        for indx in range(st, en):
            data_pt = var_x[indx]
            temp = self.neigh.predict(data_pt.reshape(1, -1))[0]

            if temp == var_y[indx]:
                self.lck.acquire()
                # print self.accurate.value,
                self.accurate.value += + 1
                # print self.accurate.value, indx,
                # print 'Thread: {}'.format(thread_number)
                self.lck.release()

    def testing(self, data_x, data_y):
        self.accurate.value = 0
        processes = []
        ranges = range(0, data_y.shape[0], data_y.shape[0] // 10)
        for x in range(len(ranges) - 1):
            p = Process(target=self.test_accuracy, args=(ranges[x], ranges[x + 1], x, data_x, data_y))
            p.start()
            processes.append(p)

        _ = map(lambda x: x.join(), processes)

        self.accuracy_percentage = self.accurate.value * 100. / data_y.shape[0]
        print 'Accuracy: {}'.format(self.accuracy_percentage)
        print 'Accurate: {}/{}'.format(self.accurate.value, data_y.shape[0])
Ejemplo n.º 20
0
class sync_queue(multiprocessing.queues.Queue):
        
        def __init__(self):
                self.q = Queue()
                self.lock = Lock()

        def add(self, item):
                self.lock.acquire()
                self.q.put(item)
                self.lock.release()

        def remove(self):
                return self.q.get()
Ejemplo n.º 21
0
 def run(self):
     signal.signal(signal.SIGINT, signal.SIG_IGN)
     l = Lock()
     l.acquire()
     pd = pcapy.open_live(self.iface_name, ethernet.ETHERNET_MTU, 0, 100)
     iface_mac_addr = interface.get_string_mac_address(self.iface_name)
     pcap_filter = "ether dst " \
                   + iface_mac_addr \
                   + " and ether proto %s " % hex(frames.WiwoFrame.ethertype) \
                   + "and not (ether[14:1] = 0x07 or ether[14:1] = 0x08)"
     pd.setfilter(pcap_filter)
     l.release()
     pd.loop(-1, self.frame_handler)
Ejemplo n.º 22
0
def main():
    global keyAlphabet, configParam, myOutputFile
    queueLock = Lock()
    outputLock = Lock()
    configParam = []
    workQueue = Queue()
    myProcesses= []
    orderArray = Array('i',[0,0])
    
    if len(sys.argv) == 4:
        for e in range(1,4):
            if sys.argv[e].isdigit():
                configParam.append(int(sys.argv[e]))
            else:
                print "parameters other than filename have to be integers"
                sys.exit()
    else:
        print "usage : <filename> <shift> <numThread> <length> "
        sys.exit()
    
    myOutputFile = 'crypted_'+`configParam[0]`+'_'+`configParam[1]`+'_'+`configParam[2]`+".txt"
    
    try:
        global myInputFile
        myInputFile = open('metin.txt','r')
        (open(myOutputFile,'w')).close()

    except:
        print "Opening failed"
        sys.exit()
    
    keyAlphabet = createKey()
    
    queueLock.acquire()
    myText = myInputFile.read(configParam[2])
    while myText != "":
        workQueue.put(myText)
        myText = myInputFile.read(configParam[2])
    queueLock.release()

    for i in range(0,configParam[1]):
        process = Process(target=readFile,args=(orderArray,workQueue,queueLock,outputLock,myOutputFile))
        process.start()
        myProcesses.append(process)
        
    
    for p in myProcesses:
        p.join()

    myInputFile.close()
Ejemplo n.º 23
0
class KnowledgeBase(Daemon):

    def __init__(self, config):
        set_logging(config)
        self.config = config
        self.pidfile = os.path.abspath(config['pidfile'])
        self.time_lock = Lock()
        self.teller_queue = JoinableQueue()
        self.session_factory = get_sasession(self.config)
        session = self.session_factory()

    def run(self):
        if int(self.config['instant_duration']):
            self.clock = Ticker(self.config, self.session_factory(),
                                self.time_lock, self.teller_queue)
            self.clock.start()

        host = self.config['kb_host']
        port = int(self.config['kb_port'])
        nproc = int(self.config['teller_processes'])
        for n in range(nproc):
            teller = Teller(self.config, self.session_factory, self.teller_queue)
            teller.daemon = True
            teller.start()
        self.socket = Listener((host, port))
        while True:
            try:
                client = self.socket.accept()
            except InterruptedError:
                return
            self.time_lock.acquire()
            self.teller_queue.put(client)
            self.time_lock.release()

    def cleanup(self, signum, frame):
        """cleanup tasks"""
        nproc = int(self.config['teller_processes'])
        for n in range(nproc):
            self.teller_queue.put(None)
        self.teller_queue.close()
        try:
            self.clock.ticking = False
        except AttributeError:
            pass
        self.teller_queue.join()
        try:
            self.clock.join()
        except AttributeError:
            pass
        logger.warn('bye from {n}, received signal {p}'.format(n=mp.current_process().name, p=str(signum)))
Ejemplo n.º 24
0
class Database():

    databaseName = 'data.sqlite3'

    def __init__(self):
        
        self.lock = Lock()
        self.connection = sqlite3.connect(self.databaseName)
        self.cursor = self.connection.cursor()

    def insertItem(self, item):
        
        sql = 'INSERT INTO UNIT VALUES('
        sql += item.fields[4][1].replace('.', '')
        sql += ", '"
        sql += item.fields[0][1]
        sql += "', '"
        sql += item.fields[1][1]
        sql += "', '"
        sql += item.fields[2][1]
        sql += "', "
        sql += item.fields[3][1];
        sql += ");"
        
        self.lock.acquire()
        
        try:
        
            logging.debug(sql + '\n')
            self.cursor.execute(sql)
        
        except sqlite3.Error as e:

            logging.warning(sql + ' ' + e.args[0])

        try:
            
            self.connection.commit()
        
        except:
            
            logging.error('SQLITE COMMIT ISSUE')

        self.lock.release()

    def doesNotContainItem(self, serial):
        
        self.cursor.execute('SELECT * FROM UNIT WHERE ID = ' + serial)
        return self.cursor.fetchone() == None
Ejemplo n.º 25
0
    def __init__(self, test_machine_manager):
        self._test_machine_manager = test_machine_manager
        self._optix = OptiX()

        self._q1 = []  # Queue 1 - Highest priority queue for immediate test execution requests
        self._q2 = []  # Queue 2 - Queue for planned test executions
        self._executing_machines = []  # List of UUIDs of machines currently executing tests

        self._queue_lock = Lock()  # Lock which must be acquired to add to or empty queues
        self._execution_lock = Lock()  # Lock which must be acquired to add to or empty queues

        self._hub_port, self._script_path = self.get_config_settings()

        ql = threading.Thread(target=self.queue_listener)
        ql.start()
Ejemplo n.º 26
0
    def __init__(self, parent):
        wx.Panel.__init__(self, parent)

        self._messagesList = []
        self._needUpdate = False
        self._lockedView = False
        
        self._htmlWindow = wx.html.HtmlWindow(self)
        sizer = wx.BoxSizer(wx.VERTICAL)

        tb = self.BuildToolbar()
        sizer.Add( tb, 0, wx.ALL | wx.ALIGN_LEFT | wx.EXPAND, 4 ) # add the toolbar to the sizer

        sizer.Add(self._htmlWindow, 1, wx.EXPAND)
        self.SetSizer(sizer)
        
        self.Bind(wx.EVT_PAINT, self.OnPaint)
        self.Bind(wx.EVT_IDLE, self.CheckIfNeedUpdate)
        
        #Check every 50ms if there is a need to update the view due to recent messages
        self._mainTimer = wx.Timer(self, wx.ID_ANY)
        self._mainTimer.Start(50)
        self.Bind(wx.EVT_TIMER, self.CheckIfNeedUpdate, self._mainTimer)

        self._messagesListLock = Lock()
Ejemplo n.º 27
0
 def __init__(self, stops=False):
     wdb.SOCKET_SERVER = 'localhost'
     wdb.SOCKET_PORT = 18273
     wdb.WDB_NO_BROWSER_AUTO_OPEN = True
     self.stops = stops
     self.lock = Lock()
     super(FakeWdbServer, self).__init__()
Ejemplo n.º 28
0
 def __init__(self, cpunum):
     self.cpus = cpunum if cpunum is not None else cpu_count()
     self._pool = Pool(self.cpus)
     self._async_restults = {}
     self._callBack_lists = {}
     self._joins = {}
     self._lock = Lock()
Ejemplo n.º 29
0
 def __init__(self, nousb = False):
     self.window = str(self)
     self.running  = True
     self.last_frame = ('none', ())
     self.frames = []
     self.frames_captured = 0
     self.objects = []
     self.start = None
     self.nousb = nousb
     self.nxtwaiting = True
     if not nousb:
         self.usb = Usbcom()
     self.lock_match = Lock()
     self.lock_array = Lock()
     
     cv2.namedWindow(self.window)
Ejemplo n.º 30
0
 def __init__(self, device, baudrate=38400, mode="serial"):
   Actuator.__init__(self)
   self.devname = device
   self.mode = mode
   self.baud = baudrate
   self.lock = Lock()
   self.last = None
Ejemplo n.º 31
0
        else:
            cf.lambda_abs2 = cf.lambda_abs
        cf.alpha_abs[args.lambda_abs2] = cf.alpha2
        for m in args.abs_igm2:
            cf.alpha_abs[m] = args.metal_alpha

        data2, ndata2, zmin_pix2, zmax_pix2 = io.read_deltas(args.in_dir2, cf.nside, cf.lambda_abs2, cf.alpha2, cf.zref, cf.cosmo, nspec=args.nspec)
        cf.data2 = data2
        cf.ndata2 = ndata2
        cf.angmax = utils.compute_ang_max(cf.cosmo,cf.rt_max,zmin_pix,zmin_pix2)
        print("")
        print("done, npix = {}".format(len(data2)))


    cf.counter = Value('i',0)
    cf.lock = Lock()
    cpu_data = {}
    for i,p in enumerate(sorted(list(data.keys()))):
        ip = i%args.nproc
        if not ip in cpu_data:
            cpu_data[ip] = []
        cpu_data[ip].append(p)


    dm_all=[]
    wdm_all=[]
    rp_all=[]
    rt_all=[]
    z_all=[]
    names=[]
    npairs_all=[]
Ejemplo n.º 32
0
Archivo: misc.py Proyecto: EQ4/DRR
#

DIR_BACKUPS = 'backups'
DIR_STREAMS = 'streams'
DIR_SLICES = 'slices'

IS_TEST = True

manager_pid = 0
queue = Queue()

params = {'shutdown_time': None}
start_time = None
config = {}
pid_map = {}
lockMap = {'prune': Lock()}
last_official_query = None


def do_nothing(signal, frame=None):
    # Catches signals that we would rather just ignore
    return True


def base_stats():
    # Reports base-level statistical information about the health of the server.
    # This is used for the /stats and /heartbeat call.
    try:
        # for some reason this can lead to a memory error
        load = [
            float(unit) for unit in os.popen(
Ejemplo n.º 33
0
# run the experiments of dynamic graph-mp (DGMP)

import subprocess
import time
from multiprocessing import Pool, Lock
from sparse_learning.proj_algo import head_proj
from sparse_learning.proj_algo import tail_proj

from post_process import *
import networkx as nx
import pickle
import EMS

logger = False
output_lock = Lock()


def print_log(log_file, string):
    if logger == True:
        # print(string)
        if log_file != None:
            outfile = open(log_file, "a")
            outfile.write(string)
            outfile.close()


def normalized_gradient(x, grad):
    # rescale gradient to a feasible space [0, 1]?
    normalized_grad = np.zeros_like(grad)
    for i in range(len(grad)):
        if grad[i] > 0.0 and x[i] == 1.0:
Ejemplo n.º 34
0
class RealTimeStreamer(Streamer):
    """
    The real time streamer works like the regular Streamer but skips over frames
    and return an image keeping Real-time impression (laggy but on time)
    """
    def __init__(self, name, url, img_rate, resol):
        Streamer.__init__(self, name, url, img_rate, resol)
        self.lock = Lock()
        self.loadingLock = Lock()
        self.loadingLock.acquire()

        self.currentImage = None
        self.loading = True

        Thread(target=self._imageGetterTarget, daemon=True).start()

    def _imageGetterTarget(self):
        while (True):
            img = Streamer.get_image(self)

            self.lock.acquire()
            self.currentImage = img
            try:
                self.loadingLock.release()
            except:
                pass
            self.lock.release()

            if (type(img) == type(None)):
                return

    def get_image(
        self
    ):  #get self.currentImage safely and in a consistent way regarding Streamer
        if (self.loading):
            self.loadingLock.acquire()  #wait for loading
            self.loading = False

        self.lock.acquire()

        i = self.currentImage

        self.lock.release()

        return i
Ejemplo n.º 35
0
class UploadEvent(MetricsEventAdapter):
    """ Image event adapter """
    _format = '.' + str(config.get('metrics.images.format', 'JPEG')).upper().lstrip('.')
    _quality = int(config.get('metrics.images.quality', 87))
    _subsampling = int(config.get('metrics.images.subsampling', 0))

    _metric_counters = {}
    _metric_counters_lock = Lock()
    _image_file_history_size = int(config.get('metrics.file_history_size', 5))

    @staticmethod
    def _replace_slash(part):
        return part.replace('\\', '/').strip('/').replace('/', '.slash.')

    def __init__(self, metric, variant, image_data, local_image_path=None, iter=0, upload_uri=None,
                 image_file_history_size=None, delete_after_upload=False, **kwargs):
        # param override_filename: override uploaded file name (notice extension will be added from local path
        # param override_filename_ext: override uploaded file extension
        if image_data is not None and not hasattr(image_data, 'shape'):
            raise ValueError('Image must have a shape attribute')
        self._image_data = image_data
        self._local_image_path = local_image_path
        self._url = None
        self._key = None
        self._count = self._get_metric_count(metric, variant)
        if not image_file_history_size:
            image_file_history_size = self._image_file_history_size
        self._filename = kwargs.pop('override_filename', None)
        if not self._filename:
            if image_file_history_size < 1:
                self._filename = '%s_%s_%08d' % (metric, variant, self._count)
            else:
                self._filename = '%s_%s_%08d' % (metric, variant, self._count % image_file_history_size)

        # make sure we have to '/' in the filename because it might access other folders,
        # and we don't want that to occur
        self._filename = self._replace_slash(self._filename)

        self._upload_uri = upload_uri
        self._delete_after_upload = delete_after_upload

        # get upload uri upfront, either predefined image format or local file extension
        # e.g.: image.png -> .png or image.raw.gz -> .raw.gz
        image_format = kwargs.pop('override_filename_ext', None)
        if image_format is None:
            image_format = self._format.lower() if self._image_data is not None else \
                '.' + '.'.join(pathlib2.Path(self._local_image_path).parts[-1].split('.')[1:])
        self._upload_filename = str(pathlib2.Path(self._filename).with_suffix(image_format))

        self._override_storage_key_prefix = kwargs.pop('override_storage_key_prefix', None)

        super(UploadEvent, self).__init__(metric, variant, iter=iter, **kwargs)

    @classmethod
    def _get_metric_count(cls, metric, variant, next=True):
        """ Returns the next count number for the given metric/variant (rotates every few calls) """
        counters = cls._metric_counters
        key = '%s_%s' % (metric, variant)
        try:
            cls._metric_counters_lock.acquire()
            value = counters.get(key, -1)
            if next:
                value = counters[key] = value + 1
            return value
        finally:
            cls._metric_counters_lock.release()

    # return No event (just the upload)
    def get_api_event(self):
        return None

    def update(self, url=None, key=None, **kwargs):
        super(UploadEvent, self).update(**kwargs)
        if url is not None:
            self._url = url
        if key is not None:
            self._key = key

    def get_file_entry(self):
        local_file = None
        # don't provide file in case this event is out of the history window
        last_count = self._get_metric_count(self.metric, self.variant, next=False)
        if abs(self._count - last_count) > self._image_file_history_size:
            output = None
        elif self._image_data is not None:
            image_data = self._image_data
            if not isinstance(image_data, np.ndarray):
                # try conversion, if it fails we'll leave it to the user.
                image_data = np.ndarray(image_data, dtype=np.uint8)
            image_data = np.atleast_3d(image_data)
            if image_data.dtype != np.uint8:
                if np.issubdtype(image_data.dtype, np.floating) and image_data.max() <= 1.0:
                    image_data = (image_data*255).astype(np.uint8)
                else:
                    image_data = image_data.astype(np.uint8)
            shape = image_data.shape
            height, width, channel = shape[:3]
            if channel == 1:
                image_data = np.reshape(image_data, (height, width))

            # serialize image
            image = Image.fromarray(image_data)
            output = six.BytesIO()
            image_format = Image.registered_extensions().get(self._format.lower(), 'JPEG')
            image.save(output, format=image_format, quality=self._quality)
            output.seek(0)
        else:
            local_file = self._local_image_path
            try:
                output = open(local_file, 'rb')
            except Exception as e:
                # something happened to the file, we should skip it
                from ...debugging.log import LoggerRoot
                LoggerRoot.get_base_logger().warning(str(e))
                return None

        return self.FileEntry(
            event=self,
            name=self._upload_filename,
            stream=output,
            url_prop='url',
            key_prop='key',
            upload_uri=self._upload_uri,
            delete_local_file=local_file if self._delete_after_upload else None,
        )

    def get_target_full_upload_uri(self, storage_uri, storage_key_prefix=None):
        e_storage_uri = self._upload_uri or storage_uri
        # if we have an entry (with or without a stream), we'll generate the URL and store it in the event
        filename = self._upload_filename
        if self._override_storage_key_prefix or not storage_key_prefix:
            storage_key_prefix = self._override_storage_key_prefix
        key = '/'.join(x for x in (storage_key_prefix, self._replace_slash(self.metric),
                                   self._replace_slash(self.variant), self._replace_slash(filename)) if x)
        url = '/'.join(x.strip('/') for x in (e_storage_uri, key))
        # make sure we preserve local path root
        if e_storage_uri.startswith('/'):
            url = '/'+url
        return key, url
Ejemplo n.º 36
0
from multiprocessing import Process, Queue, Lock

def say_hello(name='world'):
    print "Hello, %s" % name

p = Process(target=say_hello)
p.start()
p.join() # complete this process


q = Queue()

q.put('Why hello')
q.put(['a', 1, {'b': 'c'}])
print q.get()
print q.get()

l = Lock()

l.acquire()
print 'Ha! Only I can write to stdout!'
l.release()
Ejemplo n.º 37
0
def constructDatabase(assemblyList,
                      klist,
                      sketch_size,
                      oPrefix,
                      ignoreLengthOutliers=False,
                      threads=1,
                      overwrite=False,
                      reads=False,
                      mash_exec='mash'):
    """Sketch the input assemblies at the requested k-mer lengths

    A multithread wrapper around :func:`~runSketch`. Threads are used to either run multiple sketch
    processes for each klist value, or increase the threads used by each ``mash sketch`` process
    if len(klist) > threads.

    Also calculates random match probability based on length of first genome
    in assemblyList.

    Args:
        assemblyList (str)
            File with locations of assembly files to be sketched
        klist (list)
            List of k-mer sizes to sketch
        sketch_size (int)
            Size of sketch (``-s`` option)
        oPrefix (str)
            Output prefix for resulting sketch files
        ignoreLengthOutliers (bool)
            Whether to check for outlying genome lengths (and error
            if found)

            (default = False)
        threads (int)
            Number of threads to use

            (default = 1)
        overwrite (bool)
            Whether to overwrite sketch DBs, if they already exist.

            (default = False)
        reads (bool)
            If reads are being used as input

            (default = False)
        mash_exec (str)
            Location of mash executable

            (default = 'mash')

    """
    if reads:
        raise NotImplementedError("Cannot use reads with mash backend")

    names, sequences = readRfile(assemblyList, oneSeq=True)
    genome_length, max_prob = assembly_qc(sequences, klist,
                                          ignoreLengthOutliers)

    # create kmer databases
    if threads > len(klist):
        num_processes = 1
        num_threads = threads
    else:
        num_processes = threads
        num_threads = 1

    # run database construction using multiprocessing
    l = Lock()
    with NamedTemporaryFile(mode='w',
                            prefix="references",
                            suffix=".tmp",
                            dir=".") as sequenceFile:
        for sequence in sequences:
            sequenceFile.write(sequence + '\n')
        sequenceFile.flush()

        with Pool(processes=num_processes,
                  initializer=init_lock,
                  initargs=(l, )) as pool:
            pool.map(
                partial(runSketch,
                        assemblyList=sequenceFile.name,
                        sketch=sketch_size,
                        genome_length=genome_length,
                        oPrefix=oPrefix,
                        mash_exec=mash_exec,
                        overwrite=overwrite,
                        threads=num_threads), klist)
Ejemplo n.º 38
0
from . import logutil
from .model import Missing
from .util import total_size

logger = logutil.getLogger(__name__)

# ============================================================================
# Brew/Koji service interaction functions
# ============================================================================

# Populated by watch_task. Each task_id will be a key in the dict and
# each value will be a TaskInfo: https://github.com/openshift/enterprise-images/pull/178#discussion_r173812940
watch_task_info = {}
# Protects threaded access to watch_task_info
watch_task_lock = Lock()


class TaskStates(Enum):
    FREE = 0
    OPEN = 1
    CLOSED = 2
    CANCELED = 3
    ASSIGNED = 4
    FAILED = 5


class BuildStates(Enum):
    BUILDING = 0
    COMPLETE = 1
    DELETED = 2
Ejemplo n.º 39
0
class RemoteCache(CacheBase):
    '''
    Remote cache class.
    '''
    
    CACHE_TYPE_NAME = 'remote_cache'
    
    def __init__(self, cache_name, **options):
        CacheBase.__init__(self, cache_name, **options)
        self._remote_cache_provider = RemoteCacheProvider()
        self._connection = self._remote_cache_provider.get_connection()
        self._lock = Lock()
        
    def get_len(self):
        '''
        Returns cache items count.
        
        @return: int
        '''      
        
        return self._connection.execute('cache.len', self.get_name())
    
    def remove(self, key):
        '''
        Removes specified item from cache. 
        
        @param key: item key
        '''
        
        return self._connection.execute('cache.remove', self.get_name(), key)

    def get_id(self):
        '''
        Returns cache id.
        
        @return: object
        '''
        try:
            self._lock.acquire()
            
            return self._connection.execute('cache.len', self.get_name())
        finally:
            self._lock.release()
    
    def set_size(self, size):
        '''
        Sets cache size.
        
        @param size: cache new size 
        '''       
        
        return self._connection.execute('cache.set_size', self.get_name(), size)        
        
    def get_size(self):
        '''
        Returns cache size.
        
        @return: int
        '''
        
        return self._connection.execute('cache.get_size', self.get_name())

    def reset_item(self, key):
        '''
        Resets cache item.
        '''
        
        return self._connection.execute('cache.reset_item', self.get_name(), key)


    def clear(self):
        '''
        Removes all cache items.
        '''
        
        return self._connection.execute('cache.clear', self.get_name())
    
    def reset(self):
        '''
        Resets the cache.
        '''
        
        return self._connection.execute('cache.reset', self.get_name())

        
    def set(self, key, value):
        '''
        Sets given item using specified key.
        
        @param key: item key
        @param value: item value
        '''
        
        try:
            self._lock.acquire()
            _value = cPickle.dumps(value)
            return self._connection.execute('cache.set', self.get_name(), key, _value)
        finally:
            self._lock.release()

        
    def get(self, key, default = None):
        '''
        Gets given item using specified key.
        
        @param key: item key
        @return : object
        '''
        
        try:
            self._lock.acquire()
            value = self._connection.execute('cache.get', self.get_name(), key)
            if value is not None:
                deserialized_value = cPickle.loads(value)
                if deserialized_value is None:
                    return default
                return deserialized_value
            return value
        finally:
            self._lock.release()
    
    def has_key(self, key):
        '''
        Checks if given key is exist or not.
        
        @param key: item key
        @return: bool
        '''
        try:
            self._lock.acquire()
            return self._connection.execute('cache.has_key', self.get_name(), key)
        finally:
            self._lock.release()
        
    def get_items(self):
        '''
        Returns all items in cache.
        
        @return: [object]
        '''
        
        items = []
        for item in self._connection.execute('cache.get_items', self.get_name()):
            item.value = cPickle.loads(item.value)
            items.append(item)
        return items 

    def keys(self):
        '''
        Returns all keys.
        
        @return: [object]
        '''
        
        return self._connection.execute('cache.keys', self.get_name())

    def values(self):
        '''
        Returns all values.
        
        @return: [object]
        '''
        
        values = self._connection.execute('cache.values', self.get_name())
        results = []
        for value in values:
            results.append(cPickle.loads(value))
        return results 
        
    def get_last_reset_time(self):
        '''
        Returns last reset time of the cache.
        
        @return: float
        '''
        
        
        return self._connection.execute('cache.get_last_reset_time', self.get_name())
            
    def _get_cache_item_(self, key):
        
        return self._connection.execute('cache._get_cache_item_', self.get_name(), key)
Ejemplo n.º 40
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-

import datetime, logging, time, os, random, json
from RSFaceCloud.RSConcurrentInput import *
from multiprocessing import Process, Lock, Queue, Manager
from RSFaceCloud.RSfaceClientCloud import *
from BasicMethod.BasicMethod import *

ProcessLock = Lock()
TMP_PATH = './.tmp'


class RsFaceProcesses(Process):
    def __init__(self, rsInput, TestItemdict):
        super().__init__()
        self.__rsInput = rsInput
        self.__vStatDataResult = vStatisticsData()
        self.__TestItemdict = TestItemdict

    def run(self):
        try:
            logging.info('Start Processes[%d]...' %
                         self.__rsInput.getThreadId())

            self.__rsFace = RSFace(self.__rsInput.getHost(),
                                   self.__rsInput.getAppId(),
                                   self.__rsInput.getAppSecret())
            vfaceId = []
            vpersonId = []
            vgroups = []
Ejemplo n.º 41
0
                    # Append the image pixels and malignancy value to their respective arrays.
                    # image_data.append( [ (numpy.transpose(numpy.reshape(im, -1))).tolist() ] )
                    image_data.append((numpy.reshape(im, -1)).tolist())
                    malignancy_rating = [0, 0, 0, 0, 0]
                    if malignancy in range(1, 6):
                        malignancy_rating[int(malignancy) - 1] = 1
                    malignancy_series.append(malignancy_rating)
                    lock.release()

            # print(malignancy_series)
            output.put([image_data, malignancy_series])
            esprint("multiProcAllFiles: returning...")
            return

        # Multiprocess the files; takes hours with single processing.
        lock = Lock()
        numCpusToUse = int(math.floor(mp.cpu_count()))
        # processes = [mp.Process(target=multiProcAllFiles, args=(image_data, malignancy_series, dicom_files, master_join4, allFilePaths, lock, output)) for x in range(numCpusToUse)]
        processes = []
        num_files = len(allFilePaths)
        for i in range(numCpusToUse):
            idxMin = int(
                math.floor((max(0, i) / float(numCpusToUse)) * num_files))
            idxMax = int(
                math.floor((max(0, i + 1) / float(numCpusToUse)) * num_files))
            print("idxMin: " + str(idxMin) + "; idxMax: " + str(idxMax) +
                  "; num_files: " + str(num_files))
            # print(allFilePaths[idxMin:idxMax])
            processes.append(
                mp.Process(target=wrappedMultiProcAllFiles,
                           args=(image_data, malignancy_series, dicom_files,
Ejemplo n.º 42
0
        label_encoder_file=label_encoder_file)

    # open connection to the camera
    camera = easyN_A110(camera_ip_address=ip_addr,
                        username=username,
                        password=password,
                        camera_name=camera_name,
                        onvif_port=onvif_port,
                        onvif_wsdl_path=wsdl,
                        http_port=http_port,
                        rtsp_port=rtsp_port)
    camera.open_video()

    with Manager() as manager:
        # instantiate process lock
        lock = Lock()

        pan_amt = manager.Value("i", 0)
        tilt_amt = manager.Value("i", 0)
        is_system_on_high_alert = manager.Value("i", 0)
        auto_track_enabled = manager.Value("i", 1)

        # Initialise threaded PTZ controller
        processPTZ = start_PTZ_thread(lock, camera, pan_amt, tilt_amt,
                                      is_system_on_high_alert,
                                      type(camera).__name__)

        # start frames per second counter
        fps = FPS().start()

        try:
Ejemplo n.º 43
0
    def part_test_index_3_(self, key, name_1, name_2, name_3, log_save_file,
                           result_save_file, total_para_num):
        lock = Lock()
        start_time = time.time()
        load_time_1 = time.time()
        # load因子,同时根据stock_universe筛选数据.
        change_factor = self.load_change_factor(name_1)
        ratio_factor = self.load_ratio_factor(name_2)
        tech_factor = self.load_tech_factor(name_3)
        load_time_2 = time.time()
        # 加载花费数据时间
        load_delta = round(load_time_2 - load_time_1, 2)
        # 生成混合函数集
        fun_mix_2_set = create_fun_set_2()
        #################
        # 更换filter函数 #
        #################
        filter_name = filter_time_para_fun.__name__

        for fun in fun_mix_2_set:
            mix_factor = fun(change_factor, ratio_factor, tech_factor)
            if len(mix_factor.abs().sum(axis=1).replace(
                    0, np.nan).dropna()) / len(mix_factor) < 0.1:
                continue

            daily_pos = self.deal_mix_factor(mix_factor).shift(2)
            # 返回样本内筛选结果

            result_dict = filter_time_para_fun(self.time_para_dict,
                                               daily_pos,
                                               self.return_choose,
                                               if_only_long=False)
            for time_key in result_dict.keys():
                in_condition, *filter_result = result_dict[time_key]
                # result 存储
                if in_condition:
                    if self.if_save:
                        with lock:
                            f = open(result_save_file, 'a')
                            write_list = [
                                time_key, key, fun.__name__, name_1, name_2,
                                name_3, filter_name, self.sector_name,
                                in_condition
                            ] + filter_result
                            f.write('|'.join([str(x)
                                              for x in write_list]) + '\n')
                    print([
                        time_key, in_condition, fun.__name__, name_1, name_2,
                        name_3
                    ] + filter_result)
        end_time = time.time()
        # 参数存储
        if self.if_save:
            with lock:
                f = open(log_save_file, 'a')
                write_list = [
                    key, name_1, name_2, name_3, filter_name, self.sector_name,
                    round(end_time - start_time, 4), load_delta
                ]
                f.write('|'.join([str(x) for x in write_list]) + '\n')

        print('{}%, {}, {}, {}, {}, cost {} seconds, load_cost {} seconds'.
              format(round(key / total_para_num * 100, 4), key, name_1, name_2,
                     name_3, round(end_time - start_time, 2), load_delta))
Ejemplo n.º 44
0
from collections import namedtuple
import sshtunnel
import pysftp
import utils
import uuid
import json
import tqdm
import multiprocessing
from multiprocessing import Queue, Lock
import logging
# init tensorflow
from keras.backend.tensorflow_backend import set_session
from keras import backend as K
import tensorflow as tf
# init global lock
mutex = Lock()
mutex1 = Queue(1)
mutex2 = Queue(1)
mutex_data = None

# end init global lock


class task():
    """
    mainthread:

        True    : need to maintain the run() in the main thread to provide service
        False   : auto create process to provide service

    handler_type:
Ejemplo n.º 45
0
 def __init__(self, cache_name, **options):
     CacheBase.__init__(self, cache_name, **options)
     self._remote_cache_provider = RemoteCacheProvider()
     self._connection = self._remote_cache_provider.get_connection()
     self._lock = Lock()
        ef_map[line[0]].append(line[1])
for line in open(fmap2):
    line = line.rstrip('\n').split('@@@')
    if len(line) != 2:
        continue
    vocab_f.append(line[0])
    if fe_map.get(line[1]) == None:
        fe_map[line[1]] = [line[0]]
    else:
        fe_map[line[1]].append(line[0])

print "Loaded dbpedia_yago yago_dbpedia mappings."

#en:...
manager = Manager()
lock1 = Lock()

past_num = Value('i', 0, lock=True)
score = manager.list()  #store hit @ k

rank = Value('d', 0.0, lock=True)
rank_num = Value('i', 0, lock=True)

cpu_count = multiprocessing.cpu_count()
t0 = time.time()


def test(model, vocab, index, src_lan, tgt_lan, map, score, past_num):
    while index.value < len(vocab):
        id = index.value
        index.value += 1
Ejemplo n.º 47
0
# Function to dump the stacks of all threads
def get_stack_dump():
    id2name = dict([(th.ident, th.name) for th in threading_enumerate()])
    code = ["Stack dump:"]
    for threadId, stack in sys._current_frames().items():
        code.append("")
        code.append("# Thread: %s(%d)" % (id2name.get(threadId,""), threadId))
        for filename, lineno, name, line in traceback.extract_stack(stack):
            code.append('File: "%s", line %d, in %s' % (filename, lineno, name))
            if line:
                code.append("  %s" % (line.strip()))
    return code

# Signal handler that dumps all stacks and terminates
# Lock l dis-interleaves the stack traces of processes
l = Lock()
def handle_sigint(signal, frame):
    with l:
        log.fatal("CTRL-C pressed!")
        for c in get_stack_dump():
            log.devinfo(c)
    # This call raises a SystemExit exception in the
    # stack frame that was interrupted by the signal
    # For the main thread, this is what we want.
    sys.exit(-1)

# Signal handler that dumps all stacks and terminates silently
# Also uses the Lock l to dis-interleave the stack traces
def handle_sigint_silent(signal, frame):
    with l:
        for c in get_stack_dump():
Ejemplo n.º 48
0
    def __writeData_async_andCollect(self, startindex, outputDir):

        from multiprocessing import Process, Queue, cpu_count, Lock
        wo_queue = Queue()
        writelock = Lock()
        import os
        thispid = str(os.getpid())
        if not os.path.isfile(outputDir + '/snapshot.dc'):
            self.writeToFile(outputDir + '/snapshot.dc')

        tempstoragepath = '/dev/shm/' + thispid

        print('creating dir ' + tempstoragepath)
        os.system('mkdir -p ' + tempstoragepath)

        def writeData_async(index, woq, wrlck):

            import copy
            from stopwatch import stopwatch
            sw = stopwatch()
            td = copy.deepcopy(self.dataclass)
            sample = self.originRoots[index]
            ramdisksample = tempstoragepath + '/' + str(
                os.getpid()) + os.path.basename(sample)

            def removefile():
                os.system('rm -f ' + ramdisksample)

            import atexit
            atexit.register(removefile)
            success = False
            out_samplename = ''
            out_sampleentries = 0
            newname = os.path.basename(sample).rsplit('.', 1)[0]
            newname += str(index)

            if usenewformat:
                newname += '.meta'
            else:
                newname += '.z'
            newpath = os.path.abspath(outputDir + newname)

            try:
                fileTimeOut(sample, 120)  #once available copy to ram
                os.system('cp ' + sample + ' ' + ramdisksample)
                td.readFromRootFile(ramdisksample, self.means, self.weighter)
                #wrlck.acquire()
                td.writeOut(newpath)
                #wrlck.release()
                print('converted and written ' + newname + ' in ',
                      sw.getAndReset(), ' sec -', index)

                out_samplename = newname
                out_sampleentries = td.nsamples
                success = True
                td.clear()
                removefile()
                woq.put((index, [success, out_samplename, out_sampleentries]))

            except:
                print('problem in ' + newname)
                removefile()
                woq.put((index, [False, out_samplename, out_sampleentries]))
                raise

        def __collectWriteInfo(successful, samplename, sampleentries,
                               outputDir):
            if not successful:
                raise Exception("write not successful, stopping")
            import os
            self.samples.append(samplename)
            self.nsamples += sampleentries
            self.sampleentries.append(sampleentries)
            self.writeToFile(outputDir +
                             '/snapshot_tmp.dc')  #avoid to overwrite directly
            os.system('mv ' + outputDir + '/snapshot_tmp.dc ' + outputDir +
                      '/snapshot.dc')

        processes = []
        processrunning = []
        processfinished = []
        for i in range(startindex, len(self.originRoots)):
            processes.append(
                Process(target=writeData_async, args=(i, wo_queue, writelock)))
            processrunning.append(False)
            processfinished.append(False)

        nchilds = int(cpu_count() / 2) - 2 if self.nprocs <= 0 else self.nprocs
        #import os
        #if 'nvidiagtx1080' in os.getenv('HOSTNAME'):
        #    nchilds=cpu_count()-5
        if nchilds < 1:
            nchilds = 1

        #nchilds=10

        lastindex = startindex - 1
        alldone = False
        results = []
        import time
        try:
            while not alldone:
                nrunning = 0
                for runs in processrunning:
                    if runs: nrunning += 1

                for i in range(len(processes)):
                    if nrunning >= nchilds:
                        break
                    if processrunning[i]: continue
                    if processfinished[i]: continue
                    time.sleep(0.1)
                    logging.info('starting %s...' %
                                 self.originRoots[startindex + i])
                    processes[i].start()
                    processrunning[i] = True
                    nrunning += 1

                if not wo_queue.empty():
                    res = wo_queue.get()
                    results.append(res)
                    originrootindex = res[0]
                    logging.info('finished %s...' %
                                 self.originRoots[originrootindex])
                    processfinished[originrootindex - startindex] = True
                    processes[originrootindex - startindex].join(5)
                    processrunning[originrootindex - startindex] = False
                    #immediately send the next
                    continue

                for r in results:
                    thisidx = r[0]
                    if thisidx == lastindex + 1:
                        logging.info('>>>> collected result %d of %d' %
                                     (thisidx, len(self.originRoots)))
                        __collectWriteInfo(r[1][0], r[1][1], r[1][2],
                                           outputDir)
                        lastindex = thisidx

                if nrunning == 0:
                    alldone = True
                    continue
                time.sleep(0.1)

        except:
            os.system('rm -rf ' + tempstoragepath)
            raise
        os.system('rm -rf ' + tempstoragepath)
Ejemplo n.º 49
0
 def __init__(self, initial_state=0):
     self.state = Value('i', initial_state)
     self.lock = Lock()
Ejemplo n.º 50
0
"""
3、进程同步-同步锁
进程间也要共享资源!比如同时调用Pycharm的print()或者同时写一个文件,因此也存在某些操作必须上锁的必要性。

"""
from multiprocessing import Lock, Process


def f(l, i):
    if l.acquire():
        print(l)
        print(i)
    l.release()


if __name__ == '__main__':
    lock = Lock()  #创建了一把锁
    l = []
    for i in range(5):
        p = Process(target=f, args=(lock, i))
        p.start()
        l.append(p)
    for p in l:
        p.join()
Ejemplo n.º 51
0
    'cmake': {
        'cmd_prefix': [
            CMAKE_FORMAT_EXE_NAME,
            '--separate-ctrl-name-with-space',
            '--line-width', '100',
            '-'
        ],
        'filename_patterns': ('*.cmake', 'CMakeLists.txt')
    }
}

num_files_processed_so_far = 0  # Global variable
total_files_to_process = 0  # Global variable
failures = []  # Global variable

output_lock = Lock()
file_count_lock = Lock()


def output(text):
    with output_lock:
        sys.stdout.write('{}\n'.format(text))
        sys.stdout.flush()


def increment_file_count(_):
    global num_files_processed_so_far

    with file_count_lock:
        num_files_processed_so_far += 1
        if num_files_processed_so_far % 100 == 0:
Ejemplo n.º 52
0
            with open(path_result + '/' + filename, 'w') as output:
                for i in range(len(loss_list)):
                    output.write('%.4f,%.4f,%.4f\n' %
                                 (loss_list[i], hr_list[i], ndcg_list[i]))


if __name__ == '__main__':
    args = parse_args()

    dataset = None
    filename = None
    hr_recover = None
    ndcg_recover = None
    eval_queue = JoinableQueue()
    job_num = Semaphore(0)
    job_lock = Lock()

    if 'FC' in args.model:
        loss_list = range(3 * args.epochs)
        hr_list = range(3 * args.epochs)
        ndcg_list = range(3 * args.epochs)
    else:
        loss_list = range(args.epochs)
        hr_list = range(args.epochs)
        ndcg_list = range(args.epochs)

    # initialize logging and configuration
    print('------ %s ------' % (args.process_name))
    setproctitle.setproctitle(args.process_name)
    os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu)
    init_logging_and_result(args)
def processFunction(ownId, coordId, addr, port):
    time.sleep(1)

    # print('args: ', ownId, coordId, addr, port)

    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    s.connect((addr, port))
    response = s.recv(1024).decode()

    # print(response)
    r = response.split('_')[1:]
    addresses = []
    for i in range(len(r)):
        addresses.append(r[i].split('-'))
        addresses[i][0] = int(addresses[i][0])
        addresses[i][2] = int(addresses[i][2])

    s.close()
    time.sleep(1)

    # print('parte 1 funcionou')
    time.sleep(1)

    id = [ownId, coordId]

    # print('iniciando requisições',id)
    cond = []
    cond.append(True)

    electionStart = [time.time()]
    requestingThread = [
        threading.Thread(target=requestFunction,
                         args=(id, addresses, cond, electionStart))
    ]
    if (id[0] != id[1]):
        requestingThread[0].start()

    queue = []
    mutex = Lock()
    mutexQueue = Lock()

    myAddr = []
    for i in range(len(addresses)):
        if addresses[i][0] == ownId:
            myAddr = addresses[i]
            break

    conSock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    conSock.bind((myAddr[1], myAddr[2]))
    conSock.listen()

    connectionThreads = []
    stopThread = False
    i = 0

    while (True):
        conn, caddr = conSock.accept()
        # stopThread.append(False)
        t = threading.Thread(target=connectionFunction,
                             args=(lambda: stopThread, conn, mutex, queue, id,
                                   addresses, requestingThread, cond,
                                   electionStart, mutexQueue))
        t.start()
        connectionThreads.append(t)
        i += 1
Ejemplo n.º 54
0
    def __init__(self, maxsize, total_done, **kwargs):

        self.queue = Queue(maxsize=maxsize, **kwargs)
        self.lock = Lock()
        self.namespace = _get_namespace()
        self.namespace.remaining = total_done
Ejemplo n.º 55
0
 def __init__(self):
     # RawValue because we don't need it to create a Lock:
     self.val = RawValue('d', 0)
     self.num = RawValue('i', 0)
     self.lock = Lock()
Ejemplo n.º 56
0
#
#   Source Repository: https://github.com/chrissimpkins/Crunch
# ==================================================================

import sys
import os
import shutil
import struct
import subprocess
import time
from subprocess import CalledProcessError

from multiprocessing import Lock, Pool, cpu_count

# Locks
stdstream_lock = Lock()
logging_lock = Lock()

# Application Constants
VERSION = "5.0.0"
VERSION_STRING = "crunch v" + VERSION

# Processor Constant
#  - Modify this to an integer value if you want to fix the number of
#    processes spawned during execution.  The process number is
#    automatically defined during source execution when this is defined
#    as a value of 0
PROCESSES = 0

# Dependency Path Constants for Command Line Executable
#  - Redefine these path strings to use system-installed versions of
Ejemplo n.º 57
0
class ResourceLinkList:

    _uid = ''
    _upwd = ''
    _mutex = Lock()

    #dictionary, key is the link's addr.
    _links = {}

    def __init__(self, uid='', upwd=''):
        self._uid = uid
        self._upwd = upwd
        self._links = {}

    def __str__(self):
        for link in self._links:
            print link
        return self._links

    def setUid(self, uid):
        self._uid = uid

    def setUpwd(self, pwd):
        self._upwd = pwd

    def getUid(self):
        return self._uid

    def getUpwd(self):
        return self._upwd

    '''def __contains__(self, key):
		for value in self._links:
			print value.getAddr()'''

    #__getItem__ for  list[]

    def __iter__(self):
        return iter(self._links.values())

    def getLinks(self):
        return self._links

    def append(self, addr, level):
        self.appendAddr(addr, level)

    def appendAddrList(self, addrList, level):
        for addr in addrList:
            self.appendAddr(addr.encode('utf-8'), level)

    #directly save addr to list
    def appendAddr(self, addr, level=0):
        link = ResourceLink(addr, level)
        self.appendLink(link)

    #save a link to list
    def appendLink(self, link):
        if self._verifyLink(link):
            key = link.getKey()
            self._links[key] = link

    #get non visited link
    def _visitLink(self, key):
        #print 'self._links[key] != None', self._links[key] != None
        #print 'not self._links[key]._isVisited()', not self._links[key]._isVisited()
        if self._links[key] != None  \
        and not self._links[key]._isVisited():
            return self._links[key]
        else:
            self.removeLink(key)
        return None

    #visit next address which contain data
    def getNextLink(self):
        #print 'getNextLink'
        #self._mutex.acquire()
        #self._mutex.release()
        with self._mutex:
            #print 'in mutex zone'
            for key in self._links.keys():
                #print key
                link = self._visitLink(key)
                #skip the empty link
                if link == None or not link.hasData():
                    continue
                if link != None:
                    print 'next link:', link
                    return link
            return None

    # get next link which need to parsed
    def getNextToParseLink(self):
        #print 'getNextLink'
        #self._mutex.acquire()
        #self._mutex.release()
        with self._mutex:
            #print 'in mutex zone'
            for key in self._links.keys():
                #print key
                link = self._visitLink(key)
                #skip the empty link
                if link == None or not link.hasData() or link.isParsed():
                    continue
                if link != None:
                    print 'next link:', link
                    self._links[key]._setIsParsed()
                    return link
            return None

    def getNextEmptyLink(self):
        #print 'getNextLink'
        with self._mutex:
            #print 'in mutex zone'
            for key in self._links.keys():
                #print key
                link = self._visitLink(key)
                if link == None or link.hasData() or link.isAnalysis():
                    continue
                if link != None:
                    #print 'next empty link:' , link
                    self._links[key]._setAnalysis()
                    return link
            return None

    def setLinkAnalysised(self, key, isAnalysis=True):
        with self._mutex:
            self._links[key]._setAnalysis(isAnalysis)

    def setLinkVisited(self, key):
        print 'setLinkVisited'
        with self._mutex:
            self._links[key]._setVisited()

    def setLinkRawData(self, key, rawData):
        with self._mutex:
            self._links[key].setRawData(rawData)

    def setLinkResponseCode(self, key, code):
        with self._mutex:
            self._links[key].setResponseCode(code)

    def setLinkType(self, key, type):
        with self._mutex:
            self._links[key].setType(type)

    #check link validity
    def _verifyLink(self, link):
        if not isinstance(link, ResourceLink):
            return False
        key = link.getKey()
        if ((self._links.has_key(key) and self._links[key] != link.getAddr())):
            return False
        return True

    #remove link
    def removeLink(self, key):
        if self._links.has_key(key):
            oldLink = self._links[key]
            del self._links[key]
            return oldLink

    #get link list size
    def size(self):
        return len(self._links)
Ejemplo n.º 58
0
'''
Created on Oct 25, 2016

@author: Jun
'''

from multiprocessing import Process, Pool, Event, Lock, Value
from multiprocessing.queues import Queue
import os

N = 2
task_que = Queue(5)
no_more_task = Event()
global_lock = Lock()


def calculator(x, y):
    s = 0
    print 'Computing:', x, y
    for i in range(x, y, 1):
        s += 1
    print 'Local sum:', s
    return s


def post_tasks():
    for i in range(0, 10000, 1000):
        print 'Posting task..'
        task_que.put((i, i + 1000))
        print 'Posted task..{}'.format(i)
    print 'Finished posting tasks..'
Ejemplo n.º 59
0
 def register(cls, key: str):
     """ register a lock with a specific key
     this function must be called BEFORE any process is forked
     """
     cls.locks[key] = Lock()
Ejemplo n.º 60
0
class DomainObject:
    """The domain object with event sourcing.

    It should be able to :
    - mutate from a received event
    - store events together with their version number
    - store its version
    - be rehydrated from its events
    """
    def __init__(self):
        """Initialises this with it's first event."""

        self.object_id = "{}-{}".format(self.__class__.__name__,
                                        str(uuid.uuid4()))
        self.version_number = 0
        self.event_stream = list()
        self.lock = Lock()
        self.mutate("DomainObjectCreated", {"id": self.object_id})

    def mutate(self, event_name, event):
        """Add an event to the stream of events.

        :param event: the received event. That object must be JSON serializable.
        :param event_name: The name of the received event. Must be a not None string
        :raise ValueError: if event is not JSON serializable
        """
        assert event_name is not None
        assert isinstance(event_name, str)

        if not self.__is_json_serializable(event):
            raise ValueError("Event must be JSON serializable")

        self.lock.acquire()

        self.version_number += 1
        self.event_stream.append({
            "object_id":
            self.object_id,
            "object_type":
            self.__class__.__name__,
            "version":
            self.version_number,
            "event_id":
            f"{self.object_id}-{str(uuid.uuid4())}",
            "event_name":
            event_name,
            "event":
            event,
            "event_timestamp":
            datetime.datetime.now().timestamp(),
        })

        self.lock.release()

        self.__apply_event(event_name, event)

    def rehydrate(self, event_list):
        """Rehydrate the object from it's event list.

        :param event_list: the list of events for rehydratation
        """
        assert isinstance(event_list, Iterable)

        event_list.sort(key=lambda x: x["version"])

        self.lock.acquire()

        self.__clear_stream()
        for event in event_list:
            if event["version"] < self.version_number:
                raise ValueError(
                    "Rehydrated version number is {} but actual version number is {}"
                    .format(event["version"], self.version_number))

            self.__apply_event(event["event_name"], event["event"])

            self.version_number += 1
            self.object_id = event["object_id"]
            self.event_stream.append({
                "object_id":
                self.object_id,
                "object_type":
                event["object_type"],
                "version":
                event["version"],
                "event_name":
                event["event_name"],
                "event_id":
                event["event_id"],
                "event":
                event["event"],
                "event_timestamp":
                event["event_timestamp"],
            })

        self.lock.release()

    @staticmethod
    def diff_event_streams(event_stream1, event_stream2):
        """Compute the difference between two event streams.

        Diff is computed using the longest prefix of event ids.
        So returned d1 and d2 contains the event streams from the first different element

        Requires:
            The two event streams are for the same object_id

        Returns:
            A tuple (d1, d2) with d1, d2, two lists.
            d1 contains all events of event_stream1 that are different from event_stream2
            d2 contains all events of event_stream2 that are different from event_stream1

        """
        assert (len(event_stream1) == 0 or len(event_stream2) == 0 or
                event_stream1[0]["object_id"] == event_stream2[0]["object_id"])
        assert (len(set(map(lambda x: x["object_id"], event_stream1))) <= 1 and
                len(set(map(lambda x: x["object_id"], event_stream2))) <= 1)
        diff_stream1 = []
        diff_stream2 = []

        sorted_event_stream1 = sorted(deepcopy(event_stream1),
                                      key=lambda x: x["version"])
        sorted_event_stream2 = sorted(deepcopy(event_stream2),
                                      key=lambda x: x["version"])
        for (idx, (event1, event2)) in enumerate(
                zip_longest(sorted_event_stream1,
                            sorted_event_stream2,
                            fillvalue=None)):
            one_stream_finished = event1 is None or event2 is None
            if one_stream_finished:
                if event1 is not None:
                    diff_stream1 = event_stream1[idx:]
                if event2 is not None:
                    diff_stream2 = event_stream2[idx:]
                break
            elif not one_stream_finished and event1["event_id"] != event2[
                    "event_id"]:
                diff_stream1 = event_stream1[idx:]
                diff_stream2 = event_stream2[idx:]
                break

        return diff_stream1, diff_stream2

    def __clear_stream(self):
        self.event_stream = list()
        self.version_number = 0

    def __apply_event(self, event_name, event):
        function_name = "on_{}".format(event_name)
        if function_name in self.__dir__():
            getattr(self, function_name)(event)

    @staticmethod
    def __is_json_serializable(event):
        assert event is not None
        try:
            json.dumps(event)
            return True
        except Exception:
            return False