def resource_collection(self, keyword):
		t1 = tm()
		n_profiles = 0
		keyword = keyword.replace('/', ' ')
		file_ = open(os.path.join(os.path.dirname(self.directory), keyword+'.json'), 'a+')
		#--lets aim for collecting 1000+ profiles per skill/keyword
		for i in range(15):
			if i == 0:
				beg = i
				end = i+100
			else:
				beg = end
				end = end+100
			url_ = browse_url_profiles % (keyword, beg, end)
			data = self.get_resource(url_)

			for each in data:
				item = pq_(each)
				unique_id = item.attr('id')
				item_data = self.get_info(item('.sre-content'))
				item_data.append({'type': 'resource_id', 'data': unique_id})
				if unique_id not in self.n_distinct:
					self.n_distinct[unique_id] = 1
				file_.write(json.dumps(item_data)+'\n')
				n_profiles += 1
				# if n_profiles % 500 == 0:
				# 	print "%d profiles collected for %s - %s" % (n_profiles, self.area, keyword)

		file_.close()
		t2 = tm()
		print "done collecting %d records  for (%s - %s) ..in %d seconds.." % (n_profiles, self.area, keyword, int(t2-t1))
		print "TOTAL DISTINCT: %d " %  len(self.n_distinct)
		print "\n"
		self.n_done.append(self.area)
		return
 def modal(self, context, event):
     #print(tm()-self.time)
     if event.type=="ESC":
         return {'FINISHED'}
     if tm()-self.time >= 30:
         print('Auto Saving...')
         bpy.ops.file.save_incremental()
         self.time = tm()
         
     return {'PASS_THROUGH'}
	def resource_collection(self, keyword_index, keyword, sort, rest_kewords=False):
		start_time = tm()
		n_all = 0
		n_profiles = {}
		keyword = '%s' % keyword.replace('/', ' ')
		keyword = keyword.strip('\n')
		init_url = self.init_url % (keyword.replace(' ', '+'), 0, 50)
		
		filtering_urls, result_count  = self.get_filter_urls(init_url, 0)
		
		if result_count >= 1000:
			counter = 10
		else:
			counter = int(max(float(result_count)/100, 1))
		
		for route in filtering_urls:
			url_ = self.url_ % pq_(route).children('a').attr('href')
			for i in range(counter):
				if i == 0:
					beg = i
					end = i+100
				else:
					beg = end
					end = end+100
				postfix = '&start=%d&limit=%d&radius=100&%s&co=%s' % (beg, end, sort, self.country_code)	
				print url_+postfix	
				data = self.get_resource(url_+postfix, 0)

				for each in data:
					item = pq_(each)
					unique_id = item.attr('id')
					city_ = item('.location').text()
					n_profiles[unique_id] = city_
					profile_data = indeed_resumes_details(unique_id).resource_collection()
					self.save_to_disk(profile_data, unique_id)
					n_all += 1

			db_success = False
			while not db_success:
				try:
					db_insert_hash(n_profiles, self.country_code)
					db_success = True
				except:
					print 'db locked..will wait for 5 secs and try again..'
					slp(5)
					pass
			print 'inserted %d records to db.. %s, %d' % (len(n_profiles), keyword, keyword_index)	
			n_profiles = {}
			slp(0) #--sleeping for 2 secs for every filter for not making calls too fast and get blocked quickly
			gc.collect()
		gc.collect()
		current_time = tm()
		self.time_all.append((keyword, n_all, current_time - start_time))
		print 'current time passed..%d secs for one round of %s (%d)' % (int(current_time - begin_time), keyword, keyword_index)
		return
def distance(**params):
    if params:
        color_distance_ = color_distance(params, True)
        output, status = color_distance_.distance()
        return {'output': output, 'status': status}
    else:
        start_ = tm()
        color_distance_ = color_distance(request.args)
        end_ = tm()
        output, status = color_distance_.distance()
        return json.dumps({'time_spent': float(end_-start_), 'output': output, 'status': status})
def convert(**params):
    if params:
        color_conversion_ = color_convert(params, True)
        output, status = color_conversion_.convert()
        return {'output': output, 'status': status}
    else:
        start_ = tm()
        color_conversion_ = color_convert(request.args)
        end_ = tm()
        output, status = color_conversion_.convert()
        return json.dumps({'time_spent': float(end_-start_), 'output': output, 'status': status})
    def modal(self, context, event):
        #print(tm()-self.time)

        if event.type=="ESC":
            return {'FINISHED'}

        if tm()-self.time >= context.user_preferences.addons[__name__].preferences.time_btw_save:
            print('Auto Saving...')
            bpy.ops.file.save_incremental()
            self.time = tm()       
        return {'PASS_THROUGH'}
Example #7
0
 def get_timeseries_data(self):
     t1 = tm()
     keys = map(lambda p: p + self.year_window_selected,
                ['solarradiation', 'airtemperature'])
     client = establish_connection()
     res = client.renewable_energy_2015.test_.find(
         {'key': {'$in': keys}, 'code': {'$in': self.codes}},
     )
     data = self.map_([each for each in res], keys).values()
     client.close()
     t2 = tm()
     print t2 - t1
     return data
    def modal(self, context, event):
        #print(tm()-self.time)

        if context.user_preferences.addons[__name__].preferences.stop == True or event.type == 'ESC':
            print('Auto Save Disabeled')
            return {'FINISHED'}

        if tm()-self.time >= context.user_preferences.addons[__name__].preferences.time_btw_save:
            print('Auto Saving...')
            bpy.ops.file.auto_save_incremental()
            self.time = tm()

        return {'PASS_THROUGH'}
def setPanTilt(pan, tilt):
    global last_time
    # constrain to 0-100 range
    pan = 0 if pan < 0 else 100 if pan > 100 else pan
    tilt = 0 if tilt < 0 else 100 if tilt > 100 else tilt

    # Write 3 bytes to bus, 'm', pan, tilt (m for manual)
    if tm() - last_time > pantilt_rate:
        try:
            bus.write_i2c_block_data(address, ord('m'), [pan, tilt]);
        except IOError:
            print "Pan Tilt Write error (rate too fast)"
        last_time = tm()
Example #10
0
    def tune(self, nep=1, nbt=0, rec=0, prt=0):
        """ tune the parameters by running the trainer {nep} epoch.
        nep: number of epoches to go through
        nbt: number of extra batches to go through

        rec: frequency of recording. 0 means record after each epoch,
        which is the default, otherwise, record for each batch, which
        can be time consuming.

        prt: frequency of printing.
        """
        bt = self.bt
        b0 = bt.eval().item()  # starting batch
        ei, bi = 0, 0  # counted epochs and batches

        nep = nep + nbt // self.nbat()
        nbt = nbt % self.nbat()

        while (ei < nep or bi < nbt) and not self.__stop__():
            # send one batch for training
            t0 = tm()
            self.step()
            self.__time__ += tm() - t0

            # update history
            bi = bi + 1  # batch count increase by 1
            if rec > 0:  # record each batch
                self.__hist__.append(self.__rpt__())
            if prt > 0:  # print each batch
                self.cout()

            # see the next epoch relative to batch b0?
            if bt.get_value().item() == b0:
                ei = ei + 1  # epoch count increase by 1
                bi = 0  # reset batch count

            # after an epoch
            if bt.get_value().item() == 0:
                # record history
                if rec == 0:  # record at new epoch
                    self.__hist__.append(self.__rpt__())
                # print
                if prt == 0:  # print
                    self.cout()

                # adjustment at the end of each epoch
                if self.__onep__:
                    self.__onep__()
    def invoke(self, context, event):
        if not bpy.data.filepath:
            self.report({'WARNING'}, "Auto Save Cancelled: Please save a main file")
            return {'CANCELLED'}
        sep = os.path.sep
        
        context.user_preferences.addons[__name__].preferences.dir_path = context.user_preferences.addons[__name__].preferences.dir_path_user_defined + os.path.basename(bpy.data.filepath.split('.blend')[0]) + sep # to create a directory with base file name # change to prefs => access from all the code
        dir_path = rp_d( os.path.dirname(bpy.data.filepath) + context.user_preferences.addons[__name__].preferences.dir_path ) # base path + new directory path
        
        print()
        print('Creating directory and base file (copy of current file)...')

        print('Trying to create directory: ', dir_path)
        if os.path.isdir(dir_path):
            print('Directory already exists')
        else:
            os.makedirs(dir_path, exist_ok=True) # os.makedirs(dir_path) => it's enough (without any existence info)
            print('Directory created')
        
        bpy.context.user_preferences.addons[__name__].preferences.active_main_save = True # active_main_save avoid restarting at this state
        basefile = rp_f( dir_path + bpy.path.basename(bpy.data.filepath).split('.blend')[0] + '_000' +  '.blend')
        bpy.ops.wm.save_as_mainfile(filepath=basefile, copy=True)
        bpy.context.user_preferences.addons[__name__].preferences.active_main_save = False
        print('Base file created: ', basefile)

        context.user_preferences.addons[__name__].preferences.stop = False
        context.user_preferences.addons[__name__].preferences.active = True    
        self.time = tm()
        context.window_manager.modal_handler_add(self)

        print('Auto Incremental Saving started')
        print()
        return {'RUNNING_MODAL'}
    def modal(self, context, event):
        #print(tm()-self.time)

        if context.user_preferences.addons[__name__].preferences.stop == True:
            print('Auto Save Disabled')
            context.user_preferences.addons[__name__].preferences.active = False
            return {'FINISHED'}

        if tm()-self.time >= ( context.user_preferences.addons[__name__].preferences.time_btw_save_min*60 + context.user_preferences.addons[__name__].preferences.time_btw_save_second):
            print('Auto Saving...')
            bpy.ops.file.auto_save_incremental()
            if context.user_preferences.addons[__name__].preferences.report_save:
                self.report({'WARNING'}, "Auto Saving....")
            self.time = tm()

        return {'PASS_THROUGH'}
    def extract_details(self, data):
        t1 = tm()

        details = {}
        if not data:
            return details

        details["name"] = data("#basic_info_row #basic_info_cell #resume-contact").text().strip("\n")
        details["title"] = data("#basic_info_row #basic_info_cell #headline").text().strip("\n")
        details["address"] = (
            data("#basic_info_row #basic_info_cell #contact_info_container .adr #headline_location").text().strip("\n")
        )
        details["skills"] = (
            data(".skills-content #skills-items .data_display .skill-container").text().strip("\n").split(",")
        )
        details["additional_info"] = (
            data(".additionalInfo-content #additionalinfo-items .data_display")
            .text()
            .strip("\n")
            .encode("ascii", "ignore")
        )

        identities = {}
        for k, v in self.profile_identities.iteritems():
            identities[k] = {"data": []}
            for item in data(v["content"]).children():
                data_ = {}
                it = pq_(item)
                if it.attr("id").startswith(k):
                    it_id = it.attr("id")
                    item = data(v["item_w_id"] % it_id)
                    children = pq_(item.children())
                    for each, splits in v["items"]:
                        if splits:
                            item_construct = children(each).text().strip("\n").split("-")
                            for sub, index in splits.iteritems():
                                data_[sub] = item_construct[index].strip("\n")
                        else:
                            data_[each] = children(each).text().encode("ascii", "ignore").strip("\n")

                identities[k]["data"].append(data_)
            details[k] = identities[k]
        t2 = tm()
        details["time_taken"] = t2 - t1
        details["timestamp"] = tm()
        return details
def scrap_profiles(load_done=False):
	done_ = {}
	done_target = 'profile_data/done_v1.json'
	t1 = tm()
	data = get_distincts()
	#folder = '/Volumes/SKILLZEQ/%s.json'
	folder = '/Users/saif/skillz_eq_samples/%s.json'
	for i, key in enumerate(data):
		if key not in done_:
			try:
				obj = indeed_resumes_details(key)
				profile = obj.resource_collection()
				profile['semantics'] = data[key]
			except:
				print 'put to sleep for 300 secs due to break..'
				slp(300)
				try:
					obj = indeed_resumes_details(key)
					profile = obj.resource_collection()
					profile['semantics'] = data[key]
				except:
					for k_ in data:
						if k_ not in done_:
							done_[k_] = 0
					df = open(done_target, 'wb')
					df.write(json.dumps(done_))
					df.close()
					print 'script terminated at %d records...data for dones in %s' % (i, done_target)

			f = open(folder % key, 'wb')
			f.write(json.dumps(profile))
			f.close()
			done_[key] = 1

			if i % 1000 == 0:
				t2 = tm()
				print '%d records saved in %d seconds..' % (i, int(t2-t1))
				
				if i == 2000:
					break
	t2 = tm()
	print 'success... %d records scrapped.. in %d mins..' % (i, int(float(t2-t1)/60))
	return
Example #15
0
def converge_droplets_dbs():
	"""
	CONVERGES ALL THE DBS TO A SINGLE MASTER DB
	"""
	t1 = tm()
	master_con = sql.connect(master_db_file, timeout=10)
	master_cur = master_con.cursor()
	for root, directories, files in os.walk(droplet_dbs_folder):
		for filename in files:
			splitted = filename.split('.')
			if len(splitted) > 1 and splitted[1] == 'db':
				if not r_master.hget('dbs_done', filename):
					r_master.hset('dbs_done', filename, True)
					print filename
					ingest_(filename, master_cur, master_con)
	master_con.close()
	t2 = tm()
	print 'total time taken for converging .. %d' % int(t2-t1)
	return
Example #16
0
def get_followers(username, type_q, stop_value=None, access_token=None):
    """Returns data of all the users following a particular user."""
    t1 = tm()
    
    print_results = 'no'
    
    followed_by = []

    try:
        if not access_token:
            access_token = '1013808034.cdfd9a8.6dc3da1cfcb64628b5c056381f372cba'

        _id = get_id(username.encode('utf-8'), access_token)
        y, data = followers_crawler(_id, [], 0, stop_value, access_token)
    
        if len(flatten(flatten(y))) > len(data):
            #del data[:]
            for a in flatten(flatten(y)):
                if type_q == 'users_only':
                    followed_by.append([b['username'] for b in a['data']])
                elif type_q == 'user_and_bio':
                    followed_by.append([{'username': b['username'], 'bio': b['bio']} for b in a['data']])
            t2 = tm()
            #del y[:]
        else:
            #del y[:]
            for a in data['data']:
                if type_q == 'users_only':
                    followed_by.append(a['username'])
                elif type_q == 'user_and_bio':
                    followed_by.append({'username': a['username'], 'bio': a['bio']})
            #del data[:]
            t2 = tm()
        
        print "total time spent: "+ str(float(t2-t1)) + " seconds"
        
        if print_results == 'yes':
            print flatten(followed_by)
        else:
            return flatten(followed_by)
    except:
        return followed_by
Example #17
0
 def process(self):
     """reading all the data in one walk"""
     t1 = tm()
     for root, directories, files in os.walk(self.data_dir):
         for filename in files:
             filepath = os.path.join(root, filename)
             f = open(filepath, 'rb')
             data = self.makes_(f)
             for i in data:
                 if i.team in self.data:
                     if i.opposition in self.data[i.team]:
                         if i.match_type in self.data[i.team][i.opposition]:
                             self.data[i.team][i.opposition][i.match_type].append(
                                 i._asdict()
                             )
                         else:
                             self.data[i.team][i.opposition][i.match_type] = []
                             self.data[i.team][i.opposition][i.match_type].append(
                                 i._asdict()
                             )
                     else:
                         self.data[i.team][i.opposition] = {}
                         self.data[i.team][i.opposition][i.match_type] = []
                         self.data[i.team][i.opposition][i.match_type].append(
                             i._asdict()
                         )       
                 else:
                     self.data[i.team] = {}
                     self.data[i.team][i.opposition] = {}
                     self.data[i.team][i.opposition][i.match_type] = []
                     self.data[i.team][i.opposition][i.match_type].append(
                         i._asdict()
                     )
             f.close()
     self.replace_locs()
     file_saved = self.saves_(self.data)
     t2 = tm()
     print "%s saved .." % file_saved
     print "total time taken to process the files .. %f secs .. " % float(t2-t1)
     return
def save_distincts():
	"""
	This method parses the unique ids from the given 
	data directory of ids scrapped from indeed
	"""
	t1 = tm()
	object_ = {}
	data_dir = 'data/'
	#export_folder = '/Volume/SKILLZEQ/resumes_v1/%s/%s/'
	export_folder = '/Volume/SKILLZEQ/resumes_v1/%s/%s/'
	target = 'profile_data/distincts_v2.json'
	target_file = open(target, 'wb')
	for root, directories, files in os.walk(data_dir):
		for filename in files:
			file_ = filename.split('.') #--complete filename
			file_format = file_[1] #--.json
			keyword = file_[0] #--file name
			domain = root.split('/')[1] #--parent folder
			if file_format == 'json':
				filepath = os.path.join(root, filename)
				f = open(filepath, 'rb')
				for record in f:
					try:
						record = filter(lambda p: p['type'] == 'resource_id', ast.literal_eval(record))
						for i in record:
							unique_id = i['data']
							if unique_id in object_:
								object_[unique_id].append(keyword)
							else:
								object_[unique_id] = [keyword]
							#object_[unique_id] = 1
					except:
						print filepath 
						continue
				f.close()
	target_file.write(json.dumps(object_))
	target_file.close()
	t2 = tm()
	print '%d seconds taken..' % int(t2-t1)
	return
    def collect_keywords(self):
        t1 = tm()
        for keyword in self.domains:
            domain_name = keyword.upper().replace(" ", "_")
            if domain_name in self.final_data:
                container = self.final_data[domain_name]
            else:
                self.final_data[domain_name] = []
                container = self.final_data[domain_name]

            url_ = self.url_base % keyword.replace(" ", "+")
            data = self.get_resource(url_)
            for each in data:
                child = pq_(each).text()
                container.append(child)
                self.n_concepts += 1
        t2 = tm()
        f = open("keywords/skills.json", "wb")
        f.write(json.dumps(self.final_data))
        f.close()
        print "total time taken: %d seconds.." % int(t2 - t1)
        print "%d concepts saved in keywords/skills.json" % self.n_concepts
	def extract_details(self, data):
		t1 = tm()

		details = {}
		if not data:
			return details

		details['name'] = data('#basic_info_row #basic_info_cell #resume-contact').text()
		details['title'] = data('#basic_info_row #basic_info_cell #headline').text()
		details['address'] = data('#basic_info_row #basic_info_cell #contact_info_container .adr #headline_location').text()
		details['skills'] = data('.skills-content #skills-items .data_display .skill-container').text().split(',')
		details['additional_info'] = data('.additionalInfo-content #additionalinfo-items .data_display').text().encode('ascii','ignore')

		identities = {}
		for k, v in self.profile_identities.iteritems():
			identities[k] = {'data': []}
			for item in data(v['content']).children():
				data_= {}
				it = pq_(item)
				if it.attr('id').startswith(k):
					it_id = it.attr('id')
					item = data(v['item_w_id'] % it_id)
					children = pq_(item.children())
					for each, splits in v['items']:
						if splits:
							item_construct = children(each).text().split('-')
							for sub, index in splits.iteritems():
								data_[sub] = item_construct[index]
						else:
							data_[each] = children(each).text().encode('ascii','ignore')

				identities[k]['data'].append(data_)
			details[k] = identities[k]
		t2 = tm()
		details['time_taken'] = t2-t1
		details['timestamp'] = tm()
		return details
    def invoke(self, context, event):
        dir_path = os.path.dirname(bpy.data.filepath) + context.user_preferences.addons[__name__].preferences.dir_path # récupérer path de base + path du nouveau répertoire
        print()
        print('Creating directory and base file (copy of current file)...')

        os.makedirs(dir_path, exist_ok=True)
        print('Directory created')

        bpy.ops.wm.save_as_mainfile(filepath= dir_path + bpy.path.basename(bpy.data.filepath), copy= True)
        print('Base file created')

        self.time = tm()
        context.window_manager.modal_handler_add(self)
        print('Auto Incremental Saving started')
        print()

        return {'RUNNING_MODAL'}
def run_fit(time_sets,LDH_inits,Joined_data,starts,PS_inits,index):
    smallest_error = 100000.0
    for j in range(starts):

        print 'currently on start', j + 1
        # Initialising values
        ini_val = rand_ini_val()

        # Initialising and limiting parameters
        p = parameters(ini_val)

        # Fitting data
        result = minimize(fcn4min, p, args=(time_sets,LDH_inits,Single_data,PS_inits,index))

        # Calculating average and integral of absolute error
        error_list = fcn4min(p, time_sets,LDH_inits, Single_data,PS_inits,index)
        abs_error_list = map(abs, error_list)
        ave_abs_error = sum(abs_error_list)/len(error_list)
        int_abs_error = trapz(abs_error_list, dx=0.017)

        # Check error and save parametersreturn p_best, smallest_int_error
        smallest_error = min([smallest_error, ave_abs_error])
        if smallest_error == ave_abs_error:
            p_best = p
            smallest_int_error = int_abs_error

        print 'completed start', j + 1

    #Storing parameter and error values
       #all_ps.append(p_best)   obsolete
    #all_errors.append(smallest_error)         obsoleted as these were per fit but we no longer fit data files separately
    #all_int_errors.append(smallest_int_error)

    print 'Completed Fit '
    print('__________________________')


    elapsed = tm() - t
    print('*******************')
    print 'elapsed time (min) =', elapsed/60.
    vals = unpack_parameters(p_best)
    nms = ['k1', 'k2', 'k3', 'k4', 'k5', 'k6', 'k7', 'k8', 'k9', 'k10', 'k11', 'k12', 'k13', 'k14', 'k15', 'UA','pas_0', 'mu_0', 'E', 'q']
    for i, name in enumerate(nms):
        print name,vals[i]
    return p_best, smallest_int_error
 def test_memoize(self):
     """
     Test memorize data and updating if expired.
     """
     utils.get_data()
     self.assertIn('get_data', utils.CACHE)
     self.assertIn('value', utils.CACHE['get_data'])
     self.assertIn('time', utils.CACHE['get_data'])
     utils.CACHE['get_data']['time'] = 0
     utils.get_data()
     self.assertNotEqual(utils.CACHE['get_data']['time'], 0)
     utils.get_data_xml()
     self.assertEqual(len(utils.CACHE), 2)
     self.assertIn('get_data_xml', utils.CACHE)
     future_time = tm() + 100
     utils.CACHE['get_data'] = {'time': future_time, 'value': 'test'}
     utils.get_data()
     self.assertEqual(utils.CACHE['get_data']['value'], 'test')
    def invoke(self, context, event):
        #context.user_preferences.addons[__name__].preferences.stop = True # kill any running instance

        context.user_preferences.addons[__name__].preferences.dir_path = context.user_preferences.addons[__name__].preferences.dir_path_user_defined + os.path.basename(bpy.data.filepath.split('.blend')[0]) + '/'   # to create a directory with base file name # change to prefs => access from all the code
        dir_path = os.path.dirname(bpy.data.filepath) + context.user_preferences.addons[__name__].preferences.dir_path # récupérer path de base + path du nouveau répertoire

        print()
        print('Creating directory and base file (copy of current file)...')

        print('Trying to create directory: ', dir_path)
        os.makedirs(dir_path, exist_ok=True)
        print('Directory created')

        bpy.ops.wm.save_as_mainfile(filepath= dir_path + bpy.path.basename(bpy.data.filepath).split('.blend')[0] + '_000' +  '.blend', copy= True)
        print('Base file created')

        context.user_preferences.addons[__name__].preferences.stop = False
        self.time = tm() 
        context.window_manager.modal_handler_add(self)
        print('Auto Incremental Saving started')
        print()
        return {'RUNNING_MODAL'}
#C[:,0] = np.cos(2*(x-to*u))
#A[:,0] = np.cos(2*(x-to*u))
error[:, 0] = np.abs(C[:, 0] - A[:, 0])
#C[:,0] = np.cos(2*x)
#C[:,0] = x*(x-np.pi)*(x+np.pi)/10
#C[:,0] = 4*np.exp(-8*(x+np.pi/2)**2)
#C[:,0] = np.tanh(x)

Com = FTrans(C[:, 0], N)

#Integrator factor
gamma = -1j * K * u
I = np.exp(1 * gamma * dt)

#Time loop
tini = tm()
for time in range(1, Ts):
    Cm1 = -(D * K**2 + r) * Com
    Cm2 = -(D * K**2 + r) * (Com + Cm1 * (dt / 2))
    Cm3 = -(D * K**2 + r) * (Com + Cm2 * (dt / 2))
    Cm4 = -(D * K**2 + r) * (Com + Cm3 * dt)
    Cm = Com * I + np.exp(
        1 * gamma * dt) * (dt / 6) * (Cm1 + 2 * Cm2 + 2 * Cm3 + Cm4)
    C[:, time] = IFTrans(Cm, N)
    #    A[:,time] = 4*np.exp(-8*(x-(t[time]-to)*u)**2)
    #    A[:,time] = np.cos(2*(x-(t[time])*u))
    A[:, time] = 1 / np.sqrt(4 * np.pi * D *
                             (t[time])) * np.exp(-((x + a - u *
                                                    (t[time] - to))**2) /
                                                 (4 * D * (t[time])))
    #Analytical of advection-diffusion equation
from matplotlib.backends.backend_pdf import PdfPages
from time import time as tm
from Simulation import model_curves
from matplotlib import pyplot as plt
import Adjust_Kinetics
from numpy import linspace

length = Adjust_Kinetics.Specs['lengh']
Z = linspace(0, length, 1000)
t = tm()
with PdfPages('all_curves_1.pdf') as pdf:

    #Plotting to pdf

    curves = model_curves(Z)

    title = 'Molar flow rates'

    from matplotlib.pyplot import *
    fig_P = figure()
    fig_P.suptitle('P drop curve')
    plot(Z, curves['P'], label='P(Kpa)')

    legend()

    fig_Q = figure()
    fig_Q.suptitle('Flow rates')
    plot(Z, curves['Q'], label='volumetric gas flow rate(m3/s)')

    legend()
    fig_species = figure()
Example #27
0
    def getColors(self):

        verifylist = ''  #lista para checagem de nomes repetidos
        namecolor = ''
        idcolor = ''
        hexcolor = ''
        text_final = '"ColorName","ColorId","ColorHex"\n'

        links = [
            'https://www.coral.com.br/pt/paletas-de-cor/h_white',
            'https://www.coral.com.br/pt/paletas-de-cor/h_red',
            'https://www.coral.com.br/pt/paletas-de-cor/h_orange',
            'https://www.coral.com.br/pt/paletas-de-cor/h_gold',
            'https://www.coral.com.br/pt/paletas-de-cor/h_yellow',
            'https://www.coral.com.br/pt/paletas-de-cor/h_lime',
            'https://www.coral.com.br/pt/paletas-de-cor/h_green',
            'https://www.coral.com.br/pt/paletas-de-cor/h_teal',
            'https://www.coral.com.br/pt/paletas-de-cor/h_blue',
            'https://www.coral.com.br/pt/paletas-de-cor/h_violet',
            'https://www.coral.com.br/pt/paletas-de-cor/h_cool%20neutral',
            'https://www.coral.com.br/pt/paletas-de-cor/h_warm%20neutral'
        ]

        index = 1  #variavel para pegar os objetos em cada posição do html
        for link in links:  #passando por cada link referente a cada tom

            self.driver.get(link + '?tab=2')
            tm(4)

            while (
                    True
            ):  #usando laço infinito por nao ter como saber a quantidade de objetos e esperando dar erro de index para sair do laço
                try:
                    element = self.driver.find_element_by_xpath(
                        f'/html/body/div[3]/div/div/section/div/section/section[2]/div/div[2]/div/section/div/div[{index}]/a'
                    )

                    namecolor = element.get_attribute('data-title')
                    idcolor = element.get_attribute('data-colorid')
                    hexcolor = '#' + str(element.get_attribute('data-id'))

                    if verifylist.find(namecolor) == -1:

                        text_final = text_final + f'"{namecolor}"' + ',' + f'"{idcolor}"' + ',' + f'"{hexcolor}"' + '\n'
                        print(namecolor)

                    verifylist = verifylist + ',' + namecolor

                    index = index + 1
                except:
                    index = 1
                    verifylist = ''
                    break
            groumps = 1
            controller = True
            while (True):

                self.driver.get(link + '?tab=1')
                tm(7)
                self.driver.find_element_by_class_name(
                    'content-closed ').click()  # botao para mostrar mais cores

                tm(2)
                try:
                    while (True):  #pegando dados das cores

                        #usando como base para se caso for retornado um erro ele saira do laço
                        element = self.driver.find_element_by_xpath(
                            f'/html/body/div[3]/div/div/section/div/section/section[2]/div/div[2]/div[2]/div[{index}]/div[2]/div[1]/a'
                        )

                        try:  #caso retorne um erro saira do laço e continuara no proximo tom de cor
                            element = self.driver.find_element_by_xpath(
                                f'/html/body/div[3]/div/div/section/div/section/section[2]/div/div[2]/div[2]/div[{index}]/div[2]/div[{groumps}]/a'
                            )

                        except:
                            index = index + 1
                            groumps = 1
                            verifylist = ''
                            break
                        namecolor = element.get_attribute('data-title')
                        idcolor = element.get_attribute('data-colorid')
                        hexcolor = '#' + str(element.get_attribute('data-id'))
                        if verifylist.find(namecolor) == -1:
                            text_final = text_final + f'"{namecolor}"' + ',' + f'"{idcolor}"' + ',' + f'"{hexcolor}"' + '\n'
                            print(namecolor)
                        verifylist = verifylist + ',' + namecolor
                        groumps = groumps + 1

                except:

                    break

        #escrevendo no arquivo csv e depois convertendo para excel
        open('data_coral.csv', 'w', -1, "utf-8").write(text_final)
        read_file = pd.read_csv(open('data_coral.csv', 'rb'))
        read_file.to_excel('data_Coral.xlsx', index=None, header=True)
Example #28
0
def start():
    global s, d, a, m, i, tt, o, im, saur
    try:
        sleep(1)
        try:
            o = open('.cookie/sc', 'r').read()
        except IOError:
            gettime()
        o = open('.cookie/sc', 'r').read()
        o = o.split(',')
        if o[0] != tm('%d'):
            gettime()
        saur = int(o[1].replace(':', ''))
        im = int(o[2].replace(':', ''))
        s = int(o[3].replace(':', ''))
        d = int(o[4].replace(':', ''))
        a = int(o[5].replace(':', ''))
        m = int(o[6].replace(':', ''))
        i = int(o[7].replace(':', ''))
        tt = int(tm('%H%M'))
        if tt > im and tt < s:
            ss = 'sholat Subuh'
        elif tt > s and tt < d:
            ss = 'sholat Dzuhur'
        elif tt > d and tt < a:
            ss = 'sholat Ashar'
        elif tt > a and tt < m:
            ss = 'sholat Maghrib'
        elif tt > m and tt < i:
            ss = 'sholat Isya'
        elif tt > i and saur < im or tt < 2400 and saur < im and tt < saur:
            ss = 'Sahur'
        else:
            ss = 'Imsak'
        os.system('clear')
        sleep(1)
        print(banner)
        print(f'''
\033[0mJadwal sholat tanggal\033[96;1;2m {tm('%d %B, %Y')}
\033[0muntuk wilayah kota\033[96;1;2m {o[8]} \033[0mdan sekitarnya.

Sahur        :       {o[1]}

Imsak        :       {o[2]}
Subuh        :       {o[3]}
Dzuhur       :       {o[4]}
Ashar        :       {o[5]}
Maghrib      :       {o[6]}
Isya         :       {o[7]}

Sedang menantikan waktu\033[96;1;2m {ss}...\033[0m\n''')
        while True:
            tt = int(tm('%H%M'))
            time = tm('%H:%M:%S')
            if tt == s:
                print('\033[0m')
                os.system('clear')
                sleep(1)
                print(banner)
                print(
                    f'\n\033[0m[\033[94m●\033[0m] \033[97;1mSAATNYA ADZAN SUBUH\033[0m\nuntuk wilayah kota\033[96;1;2m {o[8]} \033[0mdan sekitarnya\033[0m\n'
                )
                sholat()
                start()
                break
            elif tt == d:
                print('\033[0m')
                os.system('clear')
                sleep(1)
                print(banner)
                print(
                    f'\n\033[0m[\033[94m●\033[0m] \033[97;1mSAATNYA ADZAN DZUHUR\033[0m\nuntuk wilayah kota\033[96;1;2m {o[8]} \033[0mdan sekitarnya\033[0m\n'
                )
                sholat()
                start()
                break
            elif tt == a:
                print('\033[0m')
                os.system('clear')
                sleep(1)
                print(banner)
                print(
                    f'\n\033[0m[\033[94m●\033[0m] \033[97;1mSAATNYA ADZAN ASHAR\033[0m\nuntuk wilayah kota\033[96;1;2m {o[8]} \033[0mdan sekitarnya\033[0m\n'
                )
                sholat()
                start()
                break
            elif tt == m:
                print('\033[0m')
                os.system('clear')
                sleep(1)
                print(banner)
                print(
                    f'\n\033[0m[\033[94m●\033[0m] \033[97;1mSAATNYA ADZAN MAGHRIB\033[0m\nuntuk wilayah kota\033[96;1;2m {o[8]} \033[0mdan sekitarnya\033[0m\n'
                )
                sholat()
                start()
                break
            elif tt == i:
                print('\033[0m')
                os.system('clear')
                sleep(1)
                print(banner)
                print(
                    f'\n\033[0m[\033[94m●\033[0m] \033[97;1mSAATNYA ADZAN ISYA\033[0m\nuntuk wilayah kota\033[96;1;2m {o[8]} \033[0mdan sekitarnya\033[0m\n'
                )
                sholat()
                start()
                break
            elif tt == saur:
                print('\033[0m')
                os.system('clear')
                sleep(1)
                print(banner)
                print(
                    '\n\033[0m[\033[94;1m#\033[0m] \033[97;1mWAKTUNYA BANGUN SAHUR \033[96;1;2malarm waktu sahur sudah berbunyi\n\033[0;93mKredit Sumber: \033[95;3;4m(https://youtu.be/EXjt18hF6UY)\033[0m'
                )
                puasa()
                start()
                break
            elif tt == im:
                print('\033[0m')
                os.system('clear')
                sleep(1)
                print(banner)
                print(
                    f'\n\033[0m[\033[94;1m#\033[0m] \033[97;1mWAKTUNYA IMSAK \033[0muntuk wilayah kota\033[96;1;2m {o[8]} \033[0mdan sekitarnya\n\033[0;93;1mKredit Sumber: \033[95;3;4m(https://youtu.be/OODQRq9BPSI)\033[0m'
                )
                puasa()
                start()
                break
            else:
                print(
                    '\r\033[97;1mTekan CTRL + C > Sekarang pukul\033[91;1m {} '
                    .format(time),
                    end=''),
                sys.stdout.flush()
                sleep(1)
    except KeyboardInterrupt:
        print()
        print()
        print('\033[0m[\033[91;1m!\033[0m] \033[97;1mKembali ke menu\033[0m')
        print()
        sleep(1)
        menu()
# print('')
# start_package_genm = tm()
# listam = manipulatedFitss(values_classes, target)
# print('Packages', len(listam))
# print('Example: ', listam[0])
# for i in range(min(10,len(listam))):
# 	print(listam[i])

# print('')
# print('TOTAL TIME: ', tm() - start_package_genm)

# NEW VERSION v2
print('')
print('###  STORING RESULT AS TREE  ###')
print('')
start_package_genm = tm()
treea = manipulatedFitssTree(values_classes, target)

print('  package generation: ', tm() - start_package_genm)

print('')
print('TOTAL TIME: ', tm() - start_package_genm)


# WANNA VERIFY THE NUMBER OF SOLUTIONS IN THE TREE
def countSolutions(tree):
    if isinstance(tree, int):
        return 1
    else:
        return sum([countSolutions(t) for t in tree])
Example #30
0
# get number of cpus available to job
try:
    ncpus = int(os.environ["SLURM_JOB_CPUS_PER_NODE"])
except KeyError:
    ncpus = mp.cpu_count()
# create pool of ncpus workers
p = mp.Pool(ncpus)
print '>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>#cpu: ', str(ncpus)
"""
A=tm()
p.map(work, LL, )
B=tm()
print '>>>>>>>>>>>>>>>>>>>>>>>> Time: ', str(B-A)
"""

A = tm()
for i in LL:
    p.apply_async(work, args=(i, ))
p.close()
p.join()
B = tm()
print '>>>>>>>>>>>>>>>>>>>>>>>> Time: ', str(B - A)

A = tm()
for i in LL:
    work(i)
B = tm()
print '>>>>>>>>>>>>>>>>>>>>>>>> Time: ', str(B - A)

#print ll
"""
Example #31
0
    def stopClick(self):

        # Call the back-end exit gracefully function to kill the process currently running
        #class.exit_gracefully()

        # Get the global version of variables and calculate the run time
        global t0, rtime, finalt, errormsg, processType
        rtime = tm() - t0
        finalt = self.formtime()

        #Automated, so just call the save and display method
        if processType == "Automated":
            save = self.save_and_display()
            print "Done with automated process."
            back.motorOff()
            gpio.cleanup()

#Manual, so take in input on number of trays completed and use that to calculate the filled volume.
#Then call save and diaplay.
        elif processType == "Manual":
            print "Done with Manual process."
            inputwindow = Toplevel(root)
            inputwindow.title("Input Manual Results")
            inputwindow.geometry("%dx%d%+d%+d" % (300, 200, 250, 125))

            lbl_msg = Label(inputwindow,
                            justify=RIGHT,
                            padx=30,
                            pady=20,
                            text="Input number of trays completed:",
                            font="Verdana 10")
            lbl_msg.grid(column=1, row=1, columnspan=2)

            # Create the 'Trays Completed' label
            lbl_nTrays = Label(inputwindow,
                               justify=RIGHT,
                               padx=15,
                               text="Trays Completed:",
                               font="Verdana 10")
            lbl_nTrays.grid(column=1, row=2)

            # Set the number of trays completed
            self.trayValue = Entry(inputwindow)
            self.trayValue.grid(column=2, row=2)

            # Purely aesthetic label
            lbl_pretty = Label(inputwindow,
                               justify=RIGHT,
                               padx=15,
                               text="",
                               font="Verdana 10")
            lbl_pretty.grid(column=1, row=3)

            submittbtn = Button(inputwindow,
                                text="Done",
                                command=self.save_and_display,
                                padx=10,
                                pady=5)
            submittbtn.grid(column=1, row=4, columnspan=2)

#Testing, so just state that process is stopped.
        elif processType == "Testing":
            print "Done with Testing process."

#No process entered, so will throw error
        else:
            global errormsg
            errormsg = "No process type."

        # Change button states
        self.stop.config(state=DISABLED)
        self.select.config(state=NORMAL)
Example #32
0
    def play_game(self):
        input_data = {'patients': self.patients, 'hospitals': self.hospitals, 'ambulances': self.ambulances}
        print(input_data)
        print('---------')
        buff_size_needed = sys.getsizeof(json.dumps(input_data))
        buff_size_needed = 1<<(buff_size_needed-1).bit_length()
        buff_size_message = {'buffer_size': buff_size_needed}
        self.server.send_to(json.dumps(buff_size_message), 0)
        time.sleep(2)
        self.server.send_to(json.dumps(input_data), 0)
        start = tm()
        buff_size_message = json.loads(self.server.receive_from(0, size=2048))
        buff_size_needed = int(buff_size_message['buffer_size'])
        print(buff_size_needed)
        moves = json.loads(self.server.receive_from(0, size=buff_size_needed))
        stop = tm()

        if (stop-start) > 122:
            m = 'Player ' + str(self.player_name) + ' ran for more than 2 minutes'
            self.game_over(m, [])

        hospital_locations = moves['hospital_loc']
        ambulance_moves = moves['ambulance_moves']
        print(hospital_locations)
        print('------')
        print(ambulance_moves)

        for hos_id in range(0, self.total_hospitals):
            try:
                xloc = hospital_locations[str(hos_id)]['xloc']
                yloc = hospital_locations[str(hos_id)]['yloc']
            except Exception as e:
                m = 'Didn\'t get hospital location for hospital #' + str(hos_id)
                self.game_over(m, [])

            if xloc < 0 or yloc < 0 or xloc > 1000 or yloc > 1000:
                m = 'Invalid hospital location'
                self.game_over(m, [])

            self.hospitals[hos_id]['xloc'] = xloc
            self.hospitals[hos_id]['yloc'] = yloc

        patients_saved = []
        patients_picked_up = []

        for amb_id in range(0, self.total_ambulances):
            try:
                amb_route = ambulance_moves[str(amb_id)]
            except Exception as e:
                continue
            current_loc = (self.hospitals[self.ambulances[amb_id]['starting_hospital']]['xloc'], self.hospitals[self.ambulances[amb_id]['starting_hospital']]['yloc'])

            time_counter = 0
            p_inside_amb = []

            for amb_stop in amb_route:
                if amb_stop[0].lower() == 'p':
                    if len(p_inside_amb) >= 4:
                        m = 'Cannot pick up more than 4 patients'
                        self.game_over(m, [])
                    try:
                        p_id = int(amb_stop[1:])
                        if p_id >= self.total_patients or p_id < 0:
                            m = 'Invalid patient id'
                            print(p_id)
                            self.game_over(m, [])
                    except Exception as e:
                        m = 'Error reading patient id'
                        self.game_over(m, [])
                    if p_id in patients_picked_up:
                        print('Patient ' + str(patient) + ' has already been picked up')
                    else:
                        p_inside_amb += [p_id]
                        patients_picked_up += [p_id]
                        time_counter += 1
                    new_loc = (self.patients[p_id]['xloc'], self.patients[p_id]['yloc'])
                    time_taken = self.route_time(current_loc, new_loc)
                    time_counter += time_taken
                    current_loc = new_loc
                    continue
                elif amb_stop[0].lower() == 'h':
                    try:
                        h_id = int(amb_stop[1:])
                        if h_id >= self.total_hospitals or h_id < 0:
                            m = 'Invalid hospital id'
                            self.game_over(m, [])
                    except Exception as e:
                        m = 'Error reading hospital id'
                        self.game_over(m, [])
                    new_loc = (self.hospitals[h_id]['xloc'], self.hospitals[h_id]['yloc'])
                    time_taken = self.route_time(current_loc, new_loc)
                    time_counter += time_taken
                    if len(p_inside_amb) > 0:
                        time_counter += 1
                    current_loc = new_loc
                    for patient in p_inside_amb:
                        if time_counter <= self.patients[patient]['rescuetime']:
                            print('Ambulance ' + str(amb_id) + ' saved patient ' + str(patient))
                            patients_saved += [patient]
                        # else:
                        #     print('Patient ' + str(patient) + ' died before reaching the hospital')

                    p_inside_amb = []
                    continue
                else:
                    m = 'Invalid route stop'
                    self.game_over(m, [])

        print('All ambulances have finished their routes')
        print('------------')
        print('Congratulations!')
        print('Patients that lived:')
        print(patients_saved)
        print('Total number of patients saved: ' + str(len(patients_saved)))
        self.game_over('Congratulations!', patients_saved, finished=True)
Example #33
0
def update_listenkey_timestamp():
    data = get_listenkey()
    data["timestamp"] = int(round(tm() * 1000))
    dump(data, open(filename, "wb"))
Example #34
0
def set_listenkey(data):
    data["timestamp"] = int(round(tm() * 1000))
    data["createdAt"] = int(round(tm() * 1000))
    dump(data, open(filename, "wb"))
Example #35
0
    #chk2 = make_checksumer_crc32( in_iter=False)
    chk2 = checksum_crc32.from_iter
    import sys
    #f = file( len(sys.argv)>1 and sys.argv[1] or __file__, 'rb' )

    import itertools as it
    class F:
        def __iter__(me):
            return it.repeat( ' jkfldsa; fjdskl;f dskl', 1000000)
        def seek(*a): pass
    f = F()

    from time import clock as tm

    f.seek(0)
    t = tm()
    s = chk1( iter(f) )
    t = tm() - t
    print s, 'ok1', t
    f.seek(0)
    t = tm()
    s = chk2( iter(f) )
    t = tm() - t
    print s, 'ok2', t


    N = 12+0000
    chk0 = make_checksumer_crc32()
    t = tm()
    for a in xrange(N):
        s = chk0( '13u iouiouiopuiopio jko ' )
Example #36
0
        while True:
            yield from inkr
    if n < 100: return [i for i in (2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97) if i<n];
    res = [2, 3, 5]; s = bytearray([0]); i = 7; j = 49;
    s *= n
    s[9::6] = bytearray([1])*int((n-9)/6+1)
    s[25::10] = bytearray([1])*int((n-25)/10+1)
    it = inkrementor()
    def mo():
        s[j::i+i] = bytearray([1])*int((n-(j))/(i+i)+1)
        res.append(i)
    while j<n:
        mo() if not s[i] else None
        i += next(it); j = i*i
    return res + [g for g in range(i, n, 2) if not s[g]]

N = 10**8
tries = 1

time_start = tm()
for i in range(tries):
    test=siev_np_guaz(N)
    tim = round(tm()-time_start,5)
print ("Time to complete funkction guaz_numpy (in sec): ", tim, len(test))

time_start = tm()
for i in range(tries):
    test=siev_guaz(N)
    tim = round(tm()-time_start,5)
print ("Time to complete funkction guaz_numpy (in sec): ", tim, len(test))
Example #37
0
    def fill_in_dictionary(self,
                           rho,
                           theta,
                           normalize_noll=False,
                           print_option=None):
        """
        Takes the dictionary containing the Jacobi Polynomials needed to start the
        recurrence and updates the dictionary with the newly computed polynomials

        At the same time, it translates the Jacobi polynomials into Zernike polynomials
        and adds them into a Zernike series
        """

        # Transform rho to Jacobi coordinate x = 1 - 2 * rho**2
        x = 1. - 2 * rho**2

        rho_max = np.max(rho)
        extends = [-rho_max, rho_max, -rho_max, rho_max]

        zern_counter = 0
        Z_series = np.zeros_like(rho)
        self.times = [
        ]  # List to save the times required to compute each Zernike

        # Fill up the dictionary
        for n in range(self.n + 1):
            for m in np.arange(parity(n), n + 1, 2):
                n_n = (n - m) // 2
                alfa = m
                # Compute the corresponding Jacobi polynomial via Recursion
                start = tm()
                P_n_alfa = self.smart_jacobi(x=x, n=n_n, alfa=alfa, beta=0)
                self.dict_pol['P%d%d' % (n_n, alfa)] = P_n_alfa
                # Transform Jacobi to Zernike Radial polynomial R_nm
                R = (-1)**(n_n) * rho**m * P_n_alfa

                # Transform to complete Zernike Z_nm
                if m == 0:
                    norm_coeff = np.sqrt(n + 1) if normalize_noll else 1.
                    Z = norm_coeff * R
                    end = tm()
                    self.times.append((end - start))
                    Z_series += self.coef[zern_counter] * Z
                    zern_counter += 1
                    if print_option == 'All':
                        print('n=%d, m=%d' % (n, m))
                        plt.figure()
                        plt.imshow(invert_mask(Z, self.mask),
                                   extent=extends,
                                   cmap='jet')
                        plt.title("Zernike(%d, %d)" % (n, m))
                        plt.colorbar()

                else:  # m > 0
                    norm_coeff = np.sqrt(2) * np.sqrt(
                        n + 1) if normalize_noll else 1.
                    # Compute the m+ Zernike
                    Zpos = norm_coeff * R * np.cos(np.abs(m) * theta)
                    end1 = tm()
                    Z_series += self.coef[zern_counter] * Zpos
                    zern_counter += 1
                    # Compute the m- Zernike
                    Zneg = norm_coeff * R * np.sin(np.abs(m) * theta)
                    end2 = tm()
                    self.times.append((end1 - start))
                    self.times.append((end2 - end1))
                    Z_series += self.coef[zern_counter] * Zneg
                    zern_counter += 1

                    if print_option == 'All':  # Show only m > 0 to save Figures
                        print('n=%d, m=%d' % (n, m))
                        plt.figure()
                        plt.imshow(invert_mask(Zpos, self.mask),
                                   extent=extends,
                                   cmap='jet')
                        plt.title("Zernike(%d, %d)" % (n, m))
                        plt.colorbar()
                        # plt.figure()
                        # plt.imshow(invert_mask(Zneg, self.mask), cmap='jet')
                        # plt.title("Zernike(%d, %d)" %(n,-m))
                        # plt.colorbar()
        return Z_series
Example #38
0
def scipyOdeSolverForMultiProcess(X):
    """
        Solve mutiple time the ODE integration using odeint from scipy.
    :param X: tuple containing speciesArray,time,df,functionArgs,outputArgs
            speciesArray: 2d-array with each row as the initialization for one run of the integration
            time: time step for output
            df: function used to compute the derivative
            functionArgs: additional args to give to the function
                    KarrayA: 3d-matrix with the reaction constant and stoichio coefficient
                    maskA: mask
                    maskComplementary: 1-mask
                    coLeak: the small leak used for stability, added at the end of each computation of the derivative for every species
            outputDic: dictionnary, args used for different output mode.
                        Should contain the following mandatory field:
                            "mode": a list indicating the different mode

                        Possible modes:
                            "verbose": display starting and finishing message
                                        outputDic should then contain the field:
                                            "idx": idx of the subprocess
                            "time":
                                    saving of time.
                            "ouputEqui":
                                    save of the last value reached by the integrator
                                        outputdDic should then contain the field:
                                            "outputDic": name of species to record
                                            "nameDic": link name to position
                                            "output": array to store the results
                            "outputPlot":
                                    save all value reached on time steps.
                                        outputdDic should then contain the field:
                                            "outputDic": name of species to record
                                            "nameDic": link name to position
                                            "outputPlot": array to store the results
    :return:Depending if the mode is present in outputDic["mode"]:
            output: for each run (column)m, for each species in outputDic (row), the final value reached.
            outputPlot: for each each run (column)m, for each species in outputDic (row), all reached values.
            avgTime: avgTime for the asked run
            The position is the same as the position of the key in outputDic["mode"]
    """

    speciesArray,time,df,functionArgs,outputDic = X

    if "display" in outputDic["mode"]:
        print("starting "+str(outputDic["idx"]))
    if "outputEqui" in outputDic["mode"]:
        output = outputDic["output"]
        nameDic = outputDic["nameDic"]
    if "outputPlot" in outputDic["mode"]:
        outputPlot = outputDic["outputPlot"]
        nameDic = outputDic["nameDic"]
    avgTime = 0
    for idx,species in enumerate(speciesArray):
        t0=tm()
        X2,_=odeint(df,species,time,args=functionArgs,full_output=True,rtol=1e-6,atol=1e-12)
        timeTook = tm()-t0
        avgTime += timeTook
        if "verbose" in outputDic["mode"]:
            print(str(idx)+" on "+str(len(speciesArray))+" for "+str(outputDic["idx"])+" in "+str(timeTook))
        if "outputEqui" in outputDic["mode"] or "outputPlot" in outputDic["mode"]:
            for idxOut,k in enumerate(outputDic["outputList"]):
                if "outputEqui" in outputDic["mode"]:
                    output[idxOut,idx]=X2[-1,nameDic[k]]
                if "outputPlot" in outputDic["mode"]:
                    outputPlot[idxOut,idx,:]=X2[:,nameDic[k]]
    results=[0 for _ in range(len(outputDic["mode"]))]
    if("outputEqui" in outputDic["mode"]):
        results[outputDic["mode"].index("outputEqui")] = output
    if("outputPlot" in outputDic["mode"]):
        results[outputDic["mode"].index("outputPlot")] = outputPlot
    if "time" in outputDic["mode"]:
        results[outputDic["mode"].index("time")] = avgTime/len(speciesArray)
    return tuple(results)
Example #39
0
def executeFixPointSimulation(directory_for_network, inputsArray, masks,initializationDic=None, outputList=None,
                              sparse=False, modes=["verbose","time","outputEqui"],
                              initValue=10**(-13), rescaleFactor=None):
    """
        Execute the simulation of the system saved under the directory_for_network directory.
        InputsArray contain the values for the input species.
    :param directory_for_network: directory path, where the files equations.txt and constants.txt may be found.
    :param inputsArray: The test concentrations, a t * n array where t is the number of test and n the number of node in the first layer.
    :param initializationDic: can contain initialization values for some species. If none, or the species don't appear in its key, then its value is set at initValue (default to 10**(-13)).
    :param masks: network masks
    :param outputList: list or string, species we would like to see as outputs, if default (None), then will find the species of the last layer.
                                      if string and value is "nameDic" or "all", we will give all species taking part in the reaction (usefull for debug)
    :param sparse: if sparse, usefull for large system
    :param modes: modes for outputs, don't accept outputPlot as it only provides value at equilibrium now.
    :param initValue: initial concentration value to give to all species
    :param rescaleFactor: if None, then computed as the number of nodes, else: used to divide the value of the inputs
    :param masks:
    :return:
            A result tuple depending on the modes.
    """

    assert "outputPlot" not in modes

    parsedEquation,constants,nameDic=read_file(directory_for_network + "/equations.txt", directory_for_network + "/constants.txt")
    if sparse:
        KarrayA,stochio,maskA,maskComplementary = sparseParser(parsedEquation,constants)
    else:
        KarrayA,stochio,maskA,maskComplementary = parse(parsedEquation,constants)
    KarrayA,T0,C0,constants=setToUnits(constants,KarrayA,stochio)
    print("Initialisation constant: time:"+str(T0)+" concentration:"+str(C0))

    speciesArray = obtainSpeciesArray(inputsArray,nameDic,initValue,initializationDic,C0)
    speciesArray,rescaleFactor = rescaleInputConcentration(speciesArray,nameDic=nameDic,rescaleFactor=rescaleFactor)

    ##SAVE EXPERIMENT PARAMETERS:
    attributesDic = {}
    attributesDic["rescaleFactor"] = rescaleFactor
    attributesDic["T0"] = T0
    attributesDic["C0"] = C0
    for k in initializationDic.keys():
        attributesDic[k] = speciesArray[0,nameDic[k]]
    for idx,cste in enumerate(constants):
        attributesDic["k"+str(idx)] = cste
    attributesDic["Numbers_of_Constants"] = len(constants)
    experiment_path=saveAttribute(directory_for_network, attributesDic)

    shapeP=speciesArray.shape[0]

    #let us assign the right number of task in each process
    num_workers = multiprocessing.cpu_count()-1
    idxList = findRightNumberProcessus(shapeP,num_workers)

    #let us find the species of the last layer in case:
    if outputList is None:
        outputList = obtainOutputArray(nameDic)
    elif type(outputList)==str:
        if outputList=="nameDic" or outputList=="all":
            outputList=list(nameDic.keys())
        else:
            raise Exception("asked outputList is not taken into account.")

    nbrConstant = int(readAttribute(experiment_path,["Numbers_of_Constants"])["Numbers_of_Constants"])
    if nbrConstant == 12: #only one neuron, it is easy to extract cste values
        k1,k1n,k2,k3,k3n,k4,_,k5,k5n,k6,kd,_=[readAttribute(experiment_path,["k"+str(i)])["k"+str(i)] for i in range(0,nbrConstant)]
    else:
        k1,k1n,k2,k3,k3n,k4,_,k5,k5n,k6,kd,_= [0.9999999999999998,0.1764705882352941,1.0,0.9999999999999998,0.1764705882352941,1.0,
                                               0.018823529411764708,0.9999999999999998,0.1764705882352941,1.0,0.018823529411764708,0.018823529411764708]

    inhibTemplateNames = obtainTemplateArray(masks=masks,activ=False)
    activTemplateNames= obtainTemplateArray(masks=masks,activ=True)
    TA = initializationDic[activTemplateNames[0]]/C0
    TI = initializationDic[inhibTemplateNames[0]]/C0
    E0 = initializationDic["E"]/C0
    kdI = kd
    kdT = kd

    myconstants = [k1,k1n,k2,k3,k3n,k4,k5,k5n,k6,kdI,kdT,TA,TI,E0]

    t=tm()
    print("=======================Starting Fixed Point simulation===================")
    copyArgs = obtainCopyArgsFixedPoint(idxList,modes,speciesArray,nameDic,outputList,masks,myconstants,chemicalModel="templateModel")
    with multiprocessing.get_context("spawn").Pool(processes= len(idxList[:-1])) as pool:
        myoutputs = pool.map(fixPointSolverForMultiProcess, copyArgs)
    pool.close()
    pool.join()
    print("Finished computing, closing pool")
    timeResults={}
    timeResults[directory_for_network + "_wholeRun"]= tm() - t

    if("outputEqui" in modes):
        outputArray=np.zeros((len(outputList), shapeP))
    times = []
    for idx,m in enumerate(myoutputs):
        if("outputEqui" in modes):
            try:
                outputArray[:,idxList[idx]:idxList[idx+1]] = m[modes.index("outputEqui")]
            except:
                raise Exception("error")
        if("time" in modes):
            times += [m[modes.index("time")]]
    if("time" in modes):
        timeResults[directory_for_network + "_singleRunAvg"] = np.sum(times) / len(times)
    # Let us save our result:
    savedFiles = ["false_result.csv","output_equilibrium.csv","output_full.csv"]
    for k in nameDic.keys():
        savedFiles += [k+".csv"]
    for p in savedFiles:
        if(os._exists(os.path.join(experiment_path, p))):
            print("Allready exists: renaming older")
            os.rename(os.path.join(experiment_path,p),os.path.join(experiment_path,p.split(".")[0]+"Old."+p.split(".")[1]))
    if("outputEqui" in modes):
        df=pandas.DataFrame(outputArray)
        df.to_csv(os.path.join(experiment_path, "output_equilibrium.csv"))
    results=[0 for _ in range(len(modes))]
    if("outputEqui" in modes):
        results[modes.index("outputEqui")]= outputArray
    if "time" in modes:
        results[modes.index("time")]=timeResults
    return tuple(results)
Example #40
0
print(time.time())

timex = time.localtime()

print("year: ", timex.tm_year)

print("month: ", timex.tm_mon)

print("date: ", timex.tm_mday)
print('-' * 50)
input("press enter to start")

wait_time = random.randint(1, 6)
time.sleep(wait_time)
start_time = tm()

input("press enter to stop")
end_time = tm()

print("Started at " + time.strftime("%x", time.localtime(start_time)))
print("Ended at " + time.strftime("%x", time.localtime(end_time)))
print("Reaction time is: {} ".format(end_time - start_time))
print('-' * 50)

print("time:   ", time.get_clock_info('time'))
print("monotonic:   ", time.get_clock_info('monotonic'))
print("perf_counter:   ", time.get_clock_info('perf_counter'))
print("process_time:   ", time.get_clock_info('process_time'))

print('-' * 50)
 def invoke(self, context, event):
     self.time = tm()
     context.window_manager.modal_handler_add(self)
     print('Auto Incremental Saving started successfully')
     return {'RUNNING_MODAL'}
Example #42
0
# -*- coding: utf-8 -*-
"""
Spyder Editor

This is a temporary script file.
"""
from time import time as tm

start_time = tm()

# import numpy as np
# import pandas as pd
#from collections import OrderedDict

WORKFILE_0 = "D:\\code\\dna\\E_coli.txt"
WORKFILE_1 = "D:\\code\\dna\\Vibrio_cholerae_short.txt"
WORKFILE_2 = "D:\\code\\dna\\dataset_3_5.txt"
WORKFILE_3 = "D:\\code\\dna\\dataset_example_H.txt"
WORKFILE_5 = "D:\\code\\dna\\Sample_Dataset.txt"
# WORKFILE_1 = "D:\\code\\dna\\Vibrio_cholerae.txt"
# WORKFILE_1 = "D:\\code\\dna\\dataset_5_5.txt"
# WORKFILE_1 = "D:\\code\\dna\\dataset_4_5_example.txt"
# WORKFILE_1 = "D:\\code\\dna\\ex135.txt"
# WORKFILE_1 = "D:\\code\\dna\\dataset_2994_5.txt"

OUTPUTFILE = "D:\\code\\dna\\computing_frequencies_output.txt"
# OUTPUTFILE = "D:\\code\\dna\\complementary_nucleotide_output.txt"


def read_dna(workfile):
    """    Parameters:     workfile : str
Example #43
0
def update_particles_flpf(graph: MultiDiGraph,
                          particles: MMParticles,
                          new_observation: np.ndarray,
                          time_interval: float,
                          mm_model: MapMatchingModel,
                          proposal_func: Callable,
                          lag: int = 3,
                          max_rejections: int = 50,
                          **kwargs) -> MMParticles:
    """
    Joint fixed-lag update in light of a newly received observation, uses particle filter trajectories for stitching
    Propose + reweight then fixed-lag stitching.
    :param graph: encodes road network, simplified and projected to UTM
    :param particles: unweighted particle approximation up to the previous observation time
    :param new_observation: cartesian coordinate in UTM
    :param time_interval: time between last observation and newly received observation
    :param mm_model: MapMatchingModel
    :param proposal_func: function to propagate and weight particles
    :param lag: fixed lag for resampling/stitching
    :param max_rejections: number of rejections to attempt before doing full fixed-lag stitching
        0 will do full fixed-lag stitching and track ess_stitch
    :param kwargs:
        any additional arguments to be passed to proposal
        i.e. d_refine or d_max for optimal proposal
    :return: MMParticles object
    """
    start = tm()

    # Propose and weight for each particle
    out_particles, weights, new_norm_constants = propose_particles(
        proposal_func,
        None,
        graph,
        particles,
        new_observation,
        time_interval,
        mm_model,
        full_smoothing=True,
        store_norm_quants=False,
        **kwargs)

    # Normalise weights
    weights /= sum(weights)

    if np.any(np.isnan(weights)):
        raise ZeroDivisionError('Map-matching failed (all weights zero)')

    # Store norm constants
    if hasattr(out_particles, 'prior_norm'):
        out_particles.prior_norm = np.vstack(
            [out_particles.prior_norm, new_norm_constants])
    else:
        out_particles.prior_norm = new_norm_constants[None]

    # Store ESS
    out_particles.ess_pf = np.append(out_particles.ess_pf,
                                     1 / np.sum(weights**2))

    # Update time intervals
    out_particles.time_intervals = np.append(out_particles.time_intervals,
                                             time_interval)

    # Resample
    out_particles = fixed_lag_stitching(graph, mm_model, out_particles,
                                        weights, lag, max_rejections)

    end = tm()
    out_particles.time += end - start

    return out_particles
Example #44
0
def offline_map_match(
        graph: MultiDiGraph,
        polyline: np.ndarray,
        n_samps: int,
        timestamps: Union[float, np.ndarray],
        mm_model: MapMatchingModel = ExponentialMapMatchingModel(),
        proposal: str = 'optimal',
        d_refine: int = 1,
        initial_d_truncate: float = None,
        max_rejections: int = 20,
        ess_threshold: float = 1,
        store_norm_quants: bool = False,
        **kwargs) -> MMParticles:
    """
    Runs offline map-matching. I.e. receives a full polyline and returns an equal probability collection
    of trajectories.
    Forward-filtering backward-simulation implementation - no fixed-lag approximation needed for offline inference.
    :param graph: encodes road network, simplified and projected to UTM
    :param polyline: series of cartesian cooridnates in UTM
    :param n_samps: int
        number of particles
    :param timestamps: seconds
        either float if all times between observations are the same, or a series of timestamps in seconds/UNIX timestamp
    :param mm_model: MapMatchingModel
    :param proposal: either 'optimal' or 'aux_dist'
        defaults to optimal (discretised) proposal
    :param d_refine: metres, resolution of distance discretisation
    :param initial_d_truncate: distance beyond which to assume zero likelihood probability at time zero
        defaults to 5 * mm_model.gps_sd
    :param max_rejections: number of rejections to attempt before doing full fixed-lag stitching
        0 will do full fixed-lag stitching and track ess_stitch
    :param ess_threshold: in [0,1], particle filter resamples if ess < ess_threshold * n_samps
    :param store_norm_quants: if True normalisation quanitities (including gradient evals) returned in out_particles
    :param kwargs: optional parameters to pass to proposal
        i.e. d_max, d_refine or var
        as well as ess_threshold for backward simulation update
    :return: MMParticles object
    """
    proposal_func = get_proposal(proposal)

    num_obs = len(polyline)

    ess_all = max_rejections == 0

    start = tm()

    filter_particles = [None] * num_obs
    filter_weights = np.zeros((num_obs, n_samps))

    # Initiate filter_particles
    filter_particles[0] = initiate_particles(graph,
                                             polyline[0],
                                             n_samps,
                                             mm_model=mm_model,
                                             d_refine=d_refine,
                                             d_truncate=initial_d_truncate,
                                             ess_all=ess_all)
    filter_weights[0] = 1 / n_samps
    live_weights = filter_weights[0].copy()

    ess_pf = np.zeros(num_obs)
    ess_pf[0] = n_samps

    print("0 PF ESS: " + str(ess_pf[0]))

    if 'd_refine' in inspect.getfullargspec(proposal_func)[0]:
        kwargs['d_refine'] = d_refine

    time_interval_arr = get_time_interval_array(timestamps, num_obs)

    # Forward filtering, storing x_t-1, x_t ~ p(x_t-1:t|y_t)
    for i in range(num_obs - 1):
        resample = ess_pf[i] < ess_threshold * n_samps
        filter_particles[i +
                         1], temp_weights, temp_prior_norm = propose_particles(
                             proposal_func,
                             live_weights if resample else None,
                             graph,
                             filter_particles[i],
                             polyline[i + 1],
                             time_interval_arr[i],
                             mm_model,
                             full_smoothing=False,
                             store_norm_quants=store_norm_quants,
                             **kwargs)

        filter_particles[i].prior_norm = temp_prior_norm

        if not resample:
            temp_weights *= live_weights

        temp_weights /= np.sum(temp_weights)
        filter_weights[i + 1] = temp_weights.copy()
        live_weights = temp_weights.copy()
        ess_pf[i + 1] = 1 / np.sum(temp_weights**2)

        print(
            str(filter_particles[i + 1].latest_observation_time) +
            " PF ESS: " + str(ess_pf[i + 1]))

    # Backward simulation
    out_particles = backward_simulate(graph,
                                      filter_particles,
                                      filter_weights,
                                      time_interval_arr,
                                      mm_model,
                                      max_rejections,
                                      verbose=True,
                                      store_norm_quants=store_norm_quants)
    out_particles.ess_pf = ess_pf

    end = tm()
    out_particles.time = end - start
    return out_particles
Example #45
0
def start():
    global s, d, a, m, i, tt, o, im, saur
    try:
        banner()
        try:
            o = open('.cookie/sc', 'r').read()
        except IOError:
            gettime()
        o = open('.cookie/sc', 'r').read()
        o = o.split(',')
        if o[0] != tm('%d'):
            gettime()

        im = int(o[1].replace(':', ''))
        s = int(o[2].replace(':', ''))
        d = int(o[3].replace(':', ''))
        a = int(o[4].replace(':', ''))
        m = int(o[5].replace(':', ''))
        i = int(o[6].replace(':', ''))
        tt = int(tm('%H%M'))
        saur = im - 100

        if tt > s and tt < d:
            ss = 'sholat Dzuhur'
        elif tt > d and tt < a:
            ss = 'sholat Ashar'
        elif tt > a and tt < m:
            ss = 'sholat Maghrib'
        elif tt > m and tt < i:
            ss = 'sholat Isya'
        elif tt > i and im < s or tt < 2400 and im < s and tt < im:
            ss = 'Imsak'
        else:
            ss = 'sholat Subuh'

        banner()
        print(f'''
{lg}Jadwal waktu sholat {lw}{tm('%d %B, %Y')}
{lg}untuk kota{lw} {o[7]}{lg} dan sekitarnya.

{lg}Imsak        :       {lw}{o[1]}
{lg}Subuh        :       {lw}{o[2]}
{lg}Dzuhur       :       {lw}{o[3]}
{lg}Ashar        :       {lw}{o[4]}
{lg}Maghrib      :       {lw}{o[5]}
{lg}Isya         :       {lw}{o[6]}

{lg}Sedang menantikan waktu {ss}..
ctrl + c untuk berhenti''')
        while True:

            tt = int(tm('%H%M'))
            time = tm(f'{lw}%H{lg}:{lw}%M{lg}:{lw}%S{lg}')
            if tt == s:
                banner()
                print(
                    lw +
                    f'                     {lg}SAATNYA ADZAN SUBUH{lw}\n                        untuk wilayah\n               kota {o[7]} dan sekitarnya'
                )
                print(lg + '_' * 63)

                trdsholat()
                start()

                break
            elif tt == d:
                banner()
                print(
                    lw +
                    f'                     {lg}SAATNYA ADZAN DZUHUR{lw}\n                        untuk wilayah\n               kota {o[7]} dan sekitarnya'
                )
                print(lg + '_' * 63)

                trdsholat()
                start()
                break
            elif tt == a:
                banner()
                print(
                    lw +
                    f'                     {lg}SAATNYA ADZAN ASHAR{lw}\n                        untuk wilayah\n               kota {o[7]} dan sekitarnya'
                )
                print(lg + '_' * 63)

                trdsholat()
                start()

                break

            elif tt == m:
                banner()
                print(
                    lw +
                    f'                     {lg}SAATNYA ADZAN MAGHRIB{lw}\n                        untuk wilayah\n               kota {o[7]} dan sekitarnya'
                )
                print(lg + '_' * 63)

                trdsholat()
                start()
                break

            elif tt == i:
                banner()
                print(
                    lw +
                    f'                     {lg}SAATNYA ADZAN ISYA{lw}\n                        untuk wilayah\n               kota {o[7]} dan sekitarnya'
                )
                print(lg + '_' * 63)

                trdsholat()
                start()

                break
            elif tt == im:
                banner()
                print(
                    lw +
                    f'                        {lg}WAKTU IMSAK{lw}\n                        untuk wilayah\n               kota {o[7]} dan sekitarnya'
                )
                print(lg + '_' * 63)

                trdpuasa()
                start()

                break
            elif tt == saur:
                banner()

                print(
                    lw +
                    f'                 {lg}WAKTUNYA BANGUN SAHUR  GAN !!!{lw}\n                        untuk wilayah\n               kota {o[7]} dan sekitarnya\n\n{lg}Credit:{x} https://youtu.be/EXjt18hF6UY'
                )
                print(lg + '_' * 63)

                trdpuasa()
                start()

                break

            else:
                print('\rSekarang jam {} '.format(time), end=''),
                sys.stdout.flush()
                sleep(1)

    except KeyboardInterrupt:
        menu()
def runner(method, model, target, values, tout):
    num_obj = len(values)
    measurements = {
        'optimum': 'None',
        'generation': 'None',
        'model': 'None',
        'pack_gen': 'None',
        '#_eq_classes': 'None',
        '#_packages': 'None'
    }
    # here i generate the instance
    if model == 'std':
        start_mod_gen = tm()
        if method == 'covering':
            standard = covering_standard(num_obj, target, values, tout)
        elif method == 'packing':
            standard = packing_standard(num_obj, target, values, tout)
        else:
            raise ValueError('WRONG METHOD!!!')
        end_mod_gen = tm() - start_mod_gen
        start_model = tm()
        standard.optimize()
        end_model = tm() - start_model
        optimum = standard.objVal
        # print('optimum: ', optimum)
        # print('solving time: ', end_model)
    elif model == 'eq':
        # i am now generating the equivalence classes out of the values
        values_classes = Partition(values)
        measurements.update({'#_eq_classses': len(values_classes)})
        # generating the list of packages and rearranging the data structure
        start_package_gen = tm()
        if method == 'covering':
            lista = skinnies(
                sorted(values_classes, key=lambda x: x[0],
                       reverse=True).copy(), [], target)
        elif method == 'packing':
            lista = fitss(
                sorted(values_classes, key=lambda x: x[0],
                       reverse=True).copy(), [], target)
        else:
            raise ValueError('WRONG METHOD!!!')
        lista = [[x for x in classe if x[1] > 0] for classe in lista]
        lista = [{str(sub[0]): sub[1] for sub in element} for element in lista]
        # for i in lista:
        #     print(i)
        values_classes = {str(sub[0]): sub[1] for sub in values_classes}
        print('generated_solutions ', len(lista))
        end_package_gen = tm() - start_package_gen
        measurements.update({'pack_gen': end_package_gen})
        measurements.update({'#_packages': len(lista)})
        # now i am running the equivalence class model
        start_mod_gen = tm()
        if method == 'covering':
            equivalence = covering_eq_class(lista, values_classes, tout)
#             comment on this line if you want to print the lp model in a separate file (can be useful to check the single constraints)
#             equivalence.write('model.lp')
        elif method == 'packing':
            equivalence = packing_eq_class(lista, values_classes, tout)
        else:
            raise ValueError('WRONG METHOD!!!')
        end_mod_gen = tm() - start_mod_gen
        start_model = tm()
        equivalence.optimize()
        # variables = equivalence.getVars()
        # for i in variables:
        #     print(i)
        end_model = tm() - start_model
        optimum = equivalence.objVal
        print('package generation time: ', end_package_gen)
    else:
        raise ValueError('WRONG MODEL!!!')
    print('optimum: ', optimum)
    print('model generation: ', end_mod_gen)
    print('solving time: ', end_model)
    measurements.update({'model': end_model})
    measurements.update({'optimum': optimum})
    measurements.update({'generation': end_mod_gen})
    return measurements
Example #47
0
import cv2
import numpy as np
from time import time as tm
filename = 'images/gate2.png'
img = cv2.imread(filename)

start = tm()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

gray = np.float32(gray)
dst = cv2.cornerHarris(gray, 2, 3, 0.04)

#result is dilated for marking the corners, not important
#dst = cv2.dilate(dst,None)

# Threshold for an optimal value, it may vary depending on the image.
img[dst > 0.01 * dst.max()] = [0, 255, 0]
print("Corner detection time: {} s".format(tm() - start))
cv2.imshow('dst', img)
if cv2.waitKey(0) & 0xff == 27:
    cv2.destroyAllWindows()
Example #48
0
def Compositional(problem):

    print('COMPOSITIONAL ALGORITHM')
    print('instance', problem)

    starting_time = tm()

    # first of all, let's parse the json file with the plant layout and the jobs info
    jobs, nodes, edges, Autonomy, ATRs, charging_coefficient = json_parser(
        'test_cases/%s.json' % problem)

    # now let's build the graph out of nodes and edges
    graph = nx.DiGraph()
    graph.add_nodes_from(nodes)
    graph.add_weighted_edges_from([(i[0], i[1], edges[i][0]) for i in edges])

    # i am flattening the jobs and their task to calculate distances between any two interest point
    tasks = {
        j + '_' + i: jobs[j]['tasks'][i]['location']
        for j in jobs for i in jobs[j]['tasks']
    }
    combination = {
        i: (tasks[i[0]], tasks[i[1]])
        for i in combinations(tasks, 2)
    }

    # here I compute the shortest paths between any two customers
    shortest_paths = {(i[0], i[1]): nx.shortest_path(graph,
                                                     combination[i][0],
                                                     combination[i][1],
                                                     weight='weight')
                      for i in combination}
    shortest_paths.update({
        (i[1], i[0]): nx.shortest_path(graph,
                                       combination[i][1],
                                       combination[i][0],
                                       weight='weight')
        for i in combination
        # if k_shortest_paths(graph,combination[i][0],combination[i][1],K,weight='weight') != []
    })

    ############# LET'S INITIALIZE A BUNCH OF STUFF #############

    # decision upon the whole problem
    instance = unknown

    # initialize status of the routing problem
    routing_feasibility = unknown

    # initialize the list of used sets of routes
    previous_routes = []

    # lets's set a limit on the number of routes
    routes_bound = 200

    while routing_feasibility != unsat and instance == unknown and len(
            previous_routes) < routes_bound:

        #$$$$$$$$$$$ PRINTING $$$$$$$$$$$$$#
        print('length of previous routes', len(previous_routes))

        # let's solve the routing problem
        routing_feasibility, current_routes, routes_solution = routing(
            edges, jobs, tasks, Autonomy, shortest_paths, previous_routes)

        # for i in current_routes:
        #     print(i)

        previous_routes.append(routes_solution)

        ########### TEST #############
        # routing_feasibility = unsat

        #$$$$$$$$$$$ PRINTING $$$$$$$$$$$$$#
        print('routing', routing_feasibility)

        if routing_feasibility == unsat:
            break

        # let's set the assigment problem feasibility to unknown before running the problem
        assignment_feasibility = unknown

        # initialize the list of used assignments
        previous_assignments = []

        while assignment_feasibility != unsat and instance == unknown:

            #$$$$$$$$$$$ PRINTING $$$$$$$$$$$$$#
            # print('length of previous ass', len(previous_assignments))

            ##### THIS WILL BE REMOVED AFTER I AM DONE FIXING STUFF ######
            current_paths = shortest_paths

            # shortest_paths_solution,paths_combo = paths_formatter(current_routes,current_paths,tasks)

            assignment_feasibility, locations, current_assignment = assignment(
                ATRs, current_routes, charging_coefficient,
                previous_assignments)

            # for i in locations:
            #     print(i)

            previous_assignments.append(current_assignment)

            ########### TEST #############
            # assignment_feasibility = unsat

            #$$$$$$$$$$$ PRINTING $$$$$$$$$$$$$#
            print(' assignment', assignment_feasibility)

            if assignment_feasibility == unsat:
                break

            schedule_feasibility, node_sequence, edge_sequence = schedule(
                locations, edges)

            ########### TEST #############
            # schedule_feasibility = unsat

            #$$$$$$$$$$$ PRINTING $$$$$$$$$$$$$#
            print('     schedule', schedule_feasibility)

            if schedule_feasibility == sat:
                instance = schedule_feasibility

            # let's format the current paths so that I can use them as a previous solutions for the
            # path changing problem
            shortest_paths_solution, paths_combo = paths_formatter(
                current_routes, current_paths, tasks)

            # initialize status of used paths for the current set of routes
            previous_paths = [shortest_paths_solution]

            # initialize status of the assignment problem
            paths_changing_feasibility = unknown

            # let's set a limit on the number of paths to try otherwise we'll get stuck in this loop
            bound = 15
            counter = 0
            while paths_changing_feasibility != unsat and instance == unknown and counter < bound:

                #$$$$$$$$$$$ PRINTING $$$$$$$$$$$$$#
                print('iteration', counter)

                paths_changing_feasibility, paths_changing_solution = changer(
                    graph, paths_combo, previous_paths)

                #$$$$$$$$$$$ PRINTING $$$$$$$$$$$$$#
                print('         paths_changer', paths_changing_feasibility)

                previous_paths.append(paths_changing_solution)

                # just an intermediate step to convert the paths_changing solution into a format readable by the route_checker
                buffer = {
                    route: {
                        pair: [
                            sol[2] for sol in paths_changing_solution
                            if sol[0] == route and sol[1] == pair
                        ]
                        for pair in paths_combo[route]
                    }
                    for route in paths_combo
                }

                # here I get the new paths in a format I can use
                # to check feasibility against time windows and operating range
                new_paths = {}
                for route in buffer:
                    new_paths.update({route: {}})
                    for pair in buffer[route]:
                        sequence = list(buffer[route][pair])
                        path = [pair[0]]
                        for _ in range(len(sequence)):
                            for i in sequence:
                                if i[0] == path[-1]:
                                    path.append(i[1])
                        # print(route,pair,path)
                        new_paths[route].update({pair: path})
                current_paths = new_paths

                routes_checking_feasibility, buffer_routes = routes_checking(
                    edges, jobs, tasks, Autonomy, current_paths,
                    current_routes)

                ########### TEST #############
                # routes_checking_feasibility = unsat

                #$$$$$$$$$$$ PRINTING $$$$$$$$$$$$$#
                print('         routes check', routes_checking_feasibility)

                if routes_checking_feasibility == sat:

                    current_routes = buffer_routes

                    assignment_feasibility_2, locations_2, _ = assignment(
                        ATRs, current_routes, charging_coefficient)

                    ########### TEST #############
                    # assignment_feasibility_2 = unsat

                    #$$$$$$$$$$$ PRINTING $$$$$$$$$$$$$#
                    print('             assignment check',
                          assignment_feasibility_2)

                    if assignment_feasibility_2 == sat:
                        schedule_feasibility, node_sequence, edge_sequence = schedule(
                            locations_2, edges)

                        #$$$$$$$$$$$ PRINTING $$$$$$$$$$$$$#
                        print('                 schedule check',
                              schedule_feasibility)

                        if schedule_feasibility == sat:
                            instance = schedule_feasibility

                if bound < 666:
                    counter += 1
    #### the following parts must be commented on if you have set up a limit on
    # the number of iterations of the routing problem and OFF if you relax that
    #########################################
    # and len(previous_routes) < routes_bound:
    # elif routing_feasibility == unsat and len(previous_routes) == routes_bound:
    #     instance = unknown
    #########################################
    if routing_feasibility == unsat and len(previous_routes) < routes_bound:
        instance = routing_feasibility
    elif routing_feasibility == unsat and len(previous_routes) == routes_bound:
        instance = unknown

    running_time = round(tm() - starting_time, 2)

    optimum = 'None'
    # just some output to check while running the instances in batches

    print('  feasibility', instance)
    print('  running time', running_time)
    if instance == sat:
        optimum = sum([i[1] for i in current_routes])
        print('     travelling distance: ', optimum)

        # print('##########################################')
        # for i in current_routes:
        #     print(i)
        # print('##########################################')
        # for i in locations:
        #     print(i)
        # print('##########################################')
        # for i in node_sequence:
        #     print(i)

    return instance, optimum, running_time, len(previous_routes)
instance = 'benchmark/Falkenauer_t/Falkenauer_t60_00.txt'
# instance = 'benchmark/Hard28/Hard28_BPP13.txt'
#instance = 'benchmark/Wascher/Waescher_TEST0005.txt'

# parsing the instance file
with open(instance, mode='r') as in_file:
    reader = csv.reader(in_file, delimiter='\n')
    values = []
    num_items = next(reader)
    target = int(next(reader)[0])
    for i in (reader):
        values.append(int(i[0]))
# partitioning the items into equivalence classes
values_classes = Partition(values)
start_package_gen = tm()

# generating the fit packages
lista = fitss(
    sorted(values_classes, key=lambda x: x[0], reverse=True).copy(), [],
    target)
print('package generation: ', tm() - start_package_gen)

start_package_manipulation = tm()
# manipulating the list to make it suitable for the next steps
lista = [[x for x in classe if x[1] > 0] for classe in lista]
lista = [{str(sub[0]): sub[1] for sub in element} for element in lista]
print('package manipulation: ', tm() - start_package_manipulation)

# turning the value_classes list into a dict (for convenience)
values_classes = {str(sub[0]): sub[1] for sub in values_classes}
Example #50
0
def executeODESimulation(funcForSolver, directory_for_network, inputsArray, initializationDic=None, outputList=None,
                         leak=10 ** (-13), endTime=1000, sparse=False, modes=["verbose","time", "outputPlot", "outputEqui"],
                         timeStep=0.1, initValue=10**(-13), rescaleFactor=None):
    """
        Execute the simulation of the system saved under the directory_for_network directory.
        InputsArray contain the values for the input species.
    :param funcForSolver: function used by the solver. Should provide the derivative of concentration with respect to time for all species.
                          can be a string, then we use the lassie method.
    :param directory_for_network: directory path, where the files equations.txt and constants.txt may be found.
    :param inputsArray: The test concentrations, a t * n array where t is the number of test and n the number of node in the first layer.
    :param initializationDic: can contain initialization values for some species. If none, or the species don't appear in its key, then its value is set at initValue (default to 10**(-13)).
    :param outputList: list or string, species we would like to see as outputs, if default (None), then will find the species of the last layer.
                                      if string and value is "nameDic" or "all", we will give all species taking part in the reaction (usefull for debug)
    :param leak: float, small leak to add at each time step at the concentration of all species
    :param endTime: final time
    :param sparse: if sparse
    :param modes: modes for outputs
    :param timeStep: float, value of time steps to use in integration
    :param initValue: initial concentration value to give to all species
    :param rescaleFactor: if None, then computed as the number of nodes, else: used to divide the value of the inputs
    :return:
            A result tuple depending on the modes.
    """

    parsedEquation,constants,nameDic=read_file(directory_for_network + "/equations.txt", directory_for_network + "/constants.txt")
    if sparse:
        KarrayA,stochio,maskA,maskComplementary = sparseParser(parsedEquation,constants)
    else:
        KarrayA,stochio,maskA,maskComplementary = parse(parsedEquation,constants)
    KarrayA,T0,C0,constants=setToUnits(constants,KarrayA,stochio)
    print("Initialisation constant: time:"+str(T0)+" concentration:"+str(C0))

    speciesArray = obtainSpeciesArray(inputsArray,nameDic,initValue,initializationDic,C0)
    speciesArray,rescaleFactor = rescaleInputConcentration(speciesArray,nameDic=nameDic,rescaleFactor=rescaleFactor)

    time=np.arange(0,endTime,timeStep)
    derivativeLeak = leak

    ##SAVE EXPERIMENT PARAMETERS:
    attributesDic = {}
    attributesDic["rescaleFactor"] = rescaleFactor
    attributesDic["leak"] = leak
    attributesDic["T0"] = T0
    attributesDic["C0"] = C0
    attributesDic["endTime"] = endTime
    attributesDic["time_step"] = timeStep
    for k in initializationDic.keys():
        attributesDic[k] = speciesArray[0,nameDic[k]]
    for idx,cste in enumerate(constants):
        attributesDic["k"+str(idx)] = cste
    attributesDic["Numbers_of_Constants"] = len(constants)
    experiment_path=saveAttribute(directory_for_network, attributesDic)

    shapeP=speciesArray.shape[0]

    #let us assign the right number of task in each process
    num_workers = multiprocessing.cpu_count()-1
    idxList = findRightNumberProcessus(shapeP,num_workers)

    #let us find the species of the last layer in case:
    if outputList is None:
        outputList = obtainOutputArray(nameDic)
    elif type(outputList)==str:
        if outputList=="nameDic" or outputList=="all":
            outputList=list(nameDic.keys())
        else:
            raise Exception("asked outputList is not taken into account.")
    t=tm()
    print("=======================Starting simulation===================")
    if(hasattr(funcForSolver,"__call__")):
        copyArgs = obtainCopyArgs(modes,idxList,outputList,time,funcForSolver,speciesArray,KarrayA,stochio,maskA,maskComplementary,derivativeLeak,nameDic)
        with multiprocessing.get_context("spawn").Pool(processes= len(idxList[:-1])) as pool:
            myoutputs = pool.map(scipyOdeSolverForMultiProcess, copyArgs)
        pool.close()
        pool.join()
    else:
        assert type(funcForSolver)==str
        copyArgs = obtainCopyArgsLassie(modes,idxList,outputList,time,directory_for_network,parsedEquation,constants,derivativeLeak,nameDic,speciesArray,funcForSolver)
        with multiprocessing.get_context("spawn").Pool(processes= len(idxList[:-1])) as pool:
            myoutputs = pool.map(lassieGPUsolverMultiProcess, copyArgs)
        pool.close()
        pool.join()
    print("Finished computing, closing pool")
    timeResults={}
    timeResults[directory_for_network + "_wholeRun"]= tm() - t

    if("outputEqui" in modes):
        outputArray=np.zeros((len(outputList), shapeP))
    if("outputPlot" in modes):
        outputArrayPlot=np.zeros((len(outputList), shapeP, time.shape[0]))
    times = []
    for idx,m in enumerate(myoutputs):
        if("outputEqui" in modes):
            try:
                outputArray[:,idxList[idx]:idxList[idx+1]] = m[modes.index("outputEqui")]
            except:
                raise Exception("error")
        if("outputPlot" in modes):
            outputArrayPlot[:,idxList[idx]:idxList[idx+1]] = m[modes.index("outputPlot")]
        if("time" in modes):
            times += [m[modes.index("time")]]
    if("time" in modes):
        timeResults[directory_for_network + "_singleRunAvg"] = np.sum(times) / len(times)

    # Let us save our result:
    savedFiles = ["false_result.csv","output_equilibrium.csv","output_full.csv"]
    for k in nameDic.keys():
        savedFiles += [k+".csv"]
    for p in savedFiles:
        if(os._exists(os.path.join(experiment_path, p))):
            print("Allready exists: renaming older")
            os.rename(os.path.join(experiment_path,p),os.path.join(experiment_path,p.split(".")[0]+"Old."+p.split(".")[1]))
    if("outputEqui" in modes):
        df=pandas.DataFrame(outputArray)
        df.to_csv(os.path.join(experiment_path, "output_equilibrium.csv"))
    elif("outputPlot" in modes):
        assert len(outputArrayPlot == len(outputList))
        for idx,species in enumerate(outputList):
            df=pandas.DataFrame(outputArrayPlot[idx])
            df.to_csv(os.path.join(experiment_path, "output_full_"+str(species)+".csv"))

    results=[0 for _ in range(len(modes))]
    if("outputEqui" in modes):
        results[modes.index("outputEqui")]= outputArray
    if("outputPlot" in modes):
        results[modes.index("outputPlot")]= outputArrayPlot
    if "time" in modes:
        results[modes.index("time")]=timeResults

    if("outputPlot" in modes): #sometimes we need the nameDic
        results+=[nameDic]
    return tuple(results)
parameters_to_csv = False

# #### Loading all files

# In[6]:

files = alldatafiles()
no_files = len(files)


# #### Looping over files
# Saving pdf of all torque, temp and species curves

# In[7]:

t = tm()


# Read parameters from file
fitted_parameters = lmfit.Parameters()
fitted_parameters.load(open(parfilename))

with PdfPages('all_curves_7.pdf') as pdf:
    for i, f in enumerate(files):
        time_data, temp_data, torque_data = DataFile(f).simple_data()
        
        # Trimming data
        c = cuts(torque_data)
        time_data = trim(time_data, c)
        temp_data = trim(temp_data, c)
        torque_data = trim(torque_data, c)
Example #52
0
    sql = MSSQLHelper()

    p1 = '20150101'
    p2 = '20151231'
    p3 = '1048608'
    FORCE_LOAD = False

    strqry = """
            exec 
                [uspGetPartnerAmountsByPeriod_CHURNED] 
                '""" + p1 + """',
                '""" + p2 + """',
                """ + str(p3) + """
            """
    t0 = tm()
    if FORCE_LOAD or (not ("dfinit" in locals())):
        dfinit = sql.Select(strqry)
    t1 = tm()

    print("Downloaded in {:.2f}min".format((t1 - t0) / 60))

    #df = pd.read_csv('saved_data/20170202_1622_rfm_data_clusters.csv')
    #clRep  = ClusterRepository()
    #clConfig = clRep._config_by_id('1')
    #clRep.UploadCluster(df,clConfig, sql)

    final_fields = [
        'PartnerId', 'TotalAmount', 'TranCount', 'NetMargin', 'MaxDateIndex'
    ]
    scale_fields = ["TotalAmount", "TranCount", "MaxDateIndex", "NetMargin"]
        s = time.time()
        C = self.C if self.C is not None else 1
        fac = int(np.ceil(self.wts.shape[1]*C))
        grid_output = self.grid_net.activity()
        
        # **This line is where a majority of the program's time is spent
        tot_act = np.tensordot(self.wts, grid_output,
                               axes=[[1],[0]])*(2.0/3)/fac + (1.0/3)

        logging.info('Calculated network activity: \t%.4f',time.time()-s)

        if len(tot_act) == 1:
            tot_act = tot_act[0]
        return tot_act

    
if __name__ == '__main__':
    # Check speed of PlaceCells
    logging.basicConfig(level=logging.INFO)
    from time import time as tm
    from GridCells import GridNetwork
    N = 1000
    min_grid_size = 1
    W=H=10
    k = GridNetwork(N, min_grid_size,W,H)
    s = tm()
    for _ in range(10):
        j = PlaceNetwork(500,k,'Monaco updated',.4)
        j.activity()
    print 'Time for 1:%.3f'%(tm()-s,)
    
Example #54
0
from fcm import *
from language import bit_estimation_for_guess
import os, re
from time import time as tm

confusion_matrix = dict()
order = 3
alpha = 0.01
tamanhos = [
    700000, 600000, 500000, 400000, 300000, 200000, 100000, 50000, 25000
]

total = tm()

for tamanho in tamanhos:
    print("Para tamanho = ", str(tamanho))
    lang_length, lang_learn = [], []
    for idioms in os.listdir(os.path.join(os.getcwd(), "..", "Idioms")):
        text = read_file(os.path.join(os.getcwd(), "..", "Idioms", idioms))
        text = text[0:tamanho]
        lang_length.append((len(text), re.sub(".txt", "",
                                              idioms).capitalize()))

        cnl = combination_next_letter(order, text)
        lp = letter_probability(cnl, alpha, text)

        lang_learn.append((re.sub(".txt", "", idioms).capitalize(), lp, cnl))

    print(lang_length)

    for idiom in os.listdir(os.path.join(os.getcwd(), "..", "Classifier")):
Example #55
0
import pyowm
import serial
import sys
import time
import os.path
import logging
from time import time as tm
from pyawe import constants

__all__ = ["degree_to_rhumb", "rhumb_to_direction", "degree", "send_data"]


checker = True                      # update display, if first run
close_app = False
poll_interval = 3600                  # polling interval in seconds 3600 - 1 hour
init_time = int(tm())

device_file_name = "/dev/ttyUSB{0}"   # device file name in linux

logging.basicConfig(level=logging.DEBUG,
                    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
                    filename='system.log')

town = 'Norilsk'
area = 'ru'

########################
serial_port = '/dev/ttyUSB'
serial_baudrate = '9600'
serial_parity = 'N'
########################
 def invoke(self, context, event):
     self.time = tm()
     context.window_manager.modal_handler_add(self)
     return {'RUNNING_MODAL'}
Example #57
0
def update_particles_flbs(graph: MultiDiGraph,
                          particles: MMParticles,
                          new_observation: np.ndarray,
                          time_interval: float,
                          mm_model: MapMatchingModel,
                          proposal_func: Callable,
                          lag: int = 3,
                          max_rejections: int = 20,
                          ess_threshold: float = 1.,
                          **kwargs) -> MMParticles:
    """
    Joint fixed-lag update in light of a newly received observation, uses partial backward simulation runs for stitching
    Propose + reweight then backward simulation + fixed-lag stitching.
    :param graph: encodes road network, simplified and projected to UTM
    :param particles: unweighted particle approximation up to the previous observation time
    :param new_observation: cartesian coordinate in UTM
    :param time_interval: time between last observation and newly received observation
    :param mm_model: MapMatchingModel
    :param proposal_func: function to propagate and weight particles
    :param lag: fixed lag for resampling/stitching
    :param max_rejections: number of rejections to attempt before doing full fixed-lag stitching
        0 will do full fixed-lag stitching and track ess_stitch
    :param ess_threshold: in [0,1], particle filter resamples if ess < ess_threshold * n_samps
    :param kwargs:
        any additional arguments to be passed to proposal
        i.e. d_refine or d_max for optimal proposal
    :return: MMParticles object
    """
    start = tm()

    filter_particles = particles.filter_particles

    # Extract basic quantities
    n = particles.n
    observation_times = np.append(
        particles.observation_times,
        particles.observation_times[-1] + time_interval)
    m = len(observation_times) - 1
    stitching_required = m > lag

    # Initiate particle output
    out_particles = particles.copy()

    # Which particles to propose from (out_particles have been resampled, filter_particles haven't)
    previous_resample = particles.ess_pf[-1] < ess_threshold * n
    base_particles = out_particles if previous_resample else particles.filter_particles[
        -1].copy()

    latest_filter_particles, weights, temp_prior_norm = propose_particles(
        proposal_func,
        None,
        graph,
        base_particles,
        new_observation,
        time_interval,
        mm_model,
        full_smoothing=False,
        store_norm_quants=False,
        **kwargs)

    filter_particles[-1].prior_norm = temp_prior_norm

    # Update weights if not resampled
    if not previous_resample:
        weights *= particles.filter_weights[-1]

    # Normalise weights
    weights /= sum(weights)

    # Append new filter particles and weights, discard old ones
    start_point = 1 if stitching_required else 0
    filter_particles = particles.filter_particles[start_point:] + [
        latest_filter_particles
    ]
    out_particles.filter_weights = np.append(
        out_particles.filter_weights[start_point:],
        weights[np.newaxis],
        axis=0)

    # Store ESS
    out_particles.ess_pf = np.append(out_particles.ess_pf,
                                     1 / np.sum(weights**2))

    # Update time intervals
    out_particles.time_intervals = np.append(out_particles.time_intervals,
                                             time_interval)

    # Run backward simulation
    backward_particles = backward_simulate(
        graph,
        filter_particles,
        out_particles.filter_weights,
        out_particles.time_intervals[-lag:] if lag != 0 else [],
        mm_model,
        max_rejections,
        store_ess_back=False,
        store_norm_quants=True)
    backward_particles.prior_norm = backward_particles.dev_norm_quants[0]
    del backward_particles.dev_norm_quants

    if stitching_required:
        # Largest time not to be resampled
        max_fixed_time = observation_times[m - lag - 1]

        # Extract fixed particles
        fixed_particles = out_particles.copy()
        for j in range(n):
            if out_particles[j] is None:
                continue
            max_fixed_time_index = np.where(
                out_particles[j][:, 0] == max_fixed_time)[0][0]
            fixed_particles[j] = out_particles[j][:(max_fixed_time_index + 1)]

        # Stitch
        out_particles = fixed_lag_stitch_post_split(graph, fixed_particles,
                                                    backward_particles,
                                                    np.ones(n) / n, mm_model,
                                                    max_rejections)

    else:
        out_particles.particles = backward_particles.particles

    out_particles.filter_particles = filter_particles

    end = tm()
    out_particles.time += end - start

    return out_particles
Example #58
0
    def coletar_informacoes(self):

        self.driver.get('https://www.instagram.com/')
        for cookies in self.cookies:

            self.driver.add_cookie(cookies)

        self.driver.get('https://www.instagram.com/')

        tm(3)

        self.driver.get(self.link)

        tm(3)
        pai = self.driver.execute_script(
            'return document.getElementsByClassName("v1Nh3 kIKUG  _bz0w")[0]')
        self.link_atual = pai.find_element_by_xpath('.//*').get_attribute(
            'href')
        print(self.link_atual)

        confirm = open('link_atual.pkl', 'r').read()

        if confirm.find(self.link_atual) != -1:

            print('Já existe este post')

        else:

            open('link_atual.pkl', 'w').write(self.link_atual)

            self.driver.get(self.link_atual)
            tm(2)
            try:
                link_image = self.driver.find_element_by_class_name(
                    'FFVAD').get_attribute('src')

                print(
                    '==============================================================================================================================================================================='
                )
                print('Iniciando o Download')
                print(
                    '==============================================================================================================================================================================='
                )

                open('link_atual.pkl', 'w').write(self.link_atual + ',png')
                self.DownloadImage(link_image)

                print(
                    '==============================================================================================================================================================================='
                )
                print('Finalizado o Download')
                print(
                    '==============================================================================================================================================================================='
                )
                legenda = self.driver.find_element_by_xpath(
                    '/html/body/div[1]/section/main/div/div/article/div[3]/div[1]/div/div[1]/div/span/span[1]'
                ).text

                return legenda

            except:

                print('Video Detectado!!')

                return
Example #59
0
    else: 
    	ch,fl = read_file[len(file_leader):].split('_')[:2]
    	ch_ = ch[2:]
    	fl_ = fl[4:]
    	fileId = ', '.join([ch_, fl_])
	reads = process_hdf5( readsFolder + '/' + read_file )

    res = []
    for label, (read, ref_F_R_pair) in \
			zip(['t','c'], zip(reads, ref_arrays)):

	qry = read[offset : offset+winSz]
	if len(qry) > 0 and norm == 'y' : qry = scale(qry) 

	tic=tm()
	dr, pos = dtw(qry, ref_F_R_pair) # Do DTW ...
	toc=tm()
	dt = round(toc - tic, 3)


	rangeTol = 100 # Given leader can affect results ...
	is_ok = frm - rangeTol <= pos <= to +rangeTol


	outBy =  abs(trg - pos) 

	hdr = [readsFolder, norm, fileId, amplicon ] \
		+ map(str, [winSz, len(read), len(qry), offset])
		
 def invoke(self, context, event):
     self.time = tm()
     context.window_manager.modal_handler_add(self)
     print('Auto Incremental Saving started successfully')
     return {'RUNNING_MODAL'}