Beispiel #1
1
def call(func_list):
    for index, func_obj in enumerate(func_list):
        func_name = func_obj.keys()[0]
        print yellow("%s\n步骤%d:%s 开始执行" % (dt.strftime(dt.now(), "%Y-%m-%d %H:%M:%S"), index + 1, func_name))
        # 单步执行
        """
		go_on = raw_input("是否执行步骤%s?[y/n]" %func_name)
		if go_on in ['n', 'N', 'no', 'No', 'NO']:
			print yellow("程序已被手动终止")
			exit(1)
		"""
        func = eval(func_name)
        argv = func_obj[func_name]
        argv_str = ""
        success = False
        if argv != "":
            if type(argv) is list:
                for argv_s in argv:
                    argv_str = argv_str + ", %s" % argv_s
                func_full_str = func_name + "(" + argv_str[2:] + ")"
                success = eval(func_full_str)
            else:
                argv = eval(argv)
                success = func(argv)
        else:
            success = func()
        if not success:
            print red("执行步骤:%s 时出错! Bye!" % func_name)
            exit(1)
        print yellow("%s\n步骤%d:%s 执行完成\n" % (dt.strftime(dt.now(), "%Y-%m-%d %H:%M:%S"), index + 1, func_name))
        print ""
 def get_model_data_as_json(self, model_data):
     for data in model_data:
         if 'write_date' in data and data['write_date']:
             data['write_date'] = datetime.strftime(
                 datetime.strptime(data['write_date'], dtf),
                 self.wkhtmltopdf_format
             )
         if 'create_date' in data and data['create_date']:
             data['create_date'] = datetime.strftime(
                 datetime.strptime(data['create_date'], dtf),
                 self.wkhtmltopdf_format
             )
         if 'date_started' in data and data['date_started']:
             data['date_started'] = datetime.strftime(
                 datetime.strptime(
                     data['date_started'],
                     self.pretty_date_format
                 ),
                 self.wkhtmltopdf_format
             )
         if 'date_terminated' in data and data['date_terminated']:
             data['date_terminated'] = datetime.strftime(
                 datetime.strptime(
                     data['date_terminated'],
                     self.pretty_date_format
                 ),
                 self.wkhtmltopdf_format
             )
     return json.dumps(model_data)
Beispiel #3
0
 def download_data():
     from datetime import timedelta, datetime
     # find date range for the split train, val, test (0.8, 0.1, 0.1 of total days)
     print('Downloading data for dates {} - {}'.format(
         datetime.strftime(c.start, "%Y-%m-%d"),
         datetime.strftime(c.end, "%Y-%m-%d")))
     split = [0.8, 0.1, 0.1]
     cumusplit = [np.sum(split[:i]) for i,s in enumerate(split)]
     segment_start_dates = [c.start + timedelta(
         days = int((c.end - c.start).days * interv)) for interv in cumusplit][::-1]
     stocks_list = map(lambda l: l.strip(), open(c.names_file, 'r').readlines())
     by_stock = dict((s, pdr_data.DataReader(s, 'yahoo', c.start, c.end))
             for s in stocks_list)
     seq = [[],[],[]]
     for stock in by_stock:
         lastAc = -1
         daily_returns = deque(maxlen=c.normalize_std_len)
         for rec_date in (c.start + timedelta(days=n) for n in xrange((c.end-c.start).days)):
             idx = next(i for i,d in enumerate(segment_start_dates) if rec_date >= d)
             try:
                 d = rec_date.strftime("%Y-%m-%d")
                 ac = by_stock[stock].ix[d]['Adj Close']
                 daily_return = (ac - lastAc)/lastAc
                 if len(daily_returns) == daily_returns.maxlen:
                     seq[idx].append(daily_return/np.std(daily_returns))
                 daily_returns.append(daily_return)
                 lastAc = ac
             except KeyError:
                 pass
     return [np.asarray(dat, dtype=np.float32) for dat in seq][::-1]
 def action_ship_create(self):
     res = super(SaleOrder, self).action_ship_create()
     user_tz = self.env['res.users'].browse(self._uid).tz
     from_zone = tz.gettz('UTC')
     to_zone = tz.gettz(user_tz)
     for order in self:
         for picking in order.picking_ids:
             if order.requested_date:
                 datetime_requested = \
                     datetime.strptime(order.requested_date,
                                       '%Y-%m-%d %H:%M:%S').\
                     replace(tzinfo=from_zone).astimezone(to_zone)
                 date_requested = datetime.strftime(datetime_requested,
                                                    '%Y-%m-%d')
                 date_effective = date_requested
             else:
                 date_requested = False
                 datetime_effective = \
                     datetime.strptime(order.commitment_date,
                                       '%Y-%m-%d %H:%M:%S').\
                     replace(tzinfo=from_zone).astimezone(to_zone)
                 date_effective = datetime.strftime(datetime_effective,
                                                    '%Y-%m-%d')
             vals = {'note': order.note,
                     'requested_date': date_requested,
                     'effective_date': date_effective,
                     }
             if order.supplier_id and picking.state != 'cancel' \
                     and not picking.supplier_id:
                 vals.update({'supplier_id': order.supplier_id.id})
             picking.write(vals)
     return res
 def __getData(self, post, is_question, unique_key):
     """ This will return the page dictionry
     """
     page = {'entity':'question' if is_question else 'answer'}
     try:
         page['uri'] = self.currenturi + '#' + str(post.find('span', 'wintiny').a['href'])
     except:
         log.info(self.log_msg('uri not found in %s'% self.currenturi))
         page['uri'] = self.currenturi
     try:
         page['data'] = stripHtml(post.find('div', 'fMsg').renderContents())
     except:
         log.info(self.log_msg('Empty data found in %s'%page['uri']))
         return 
     try:
         date_str = [x.strip() for x in stripHtml(post.find('div', 'msgHead').findAll('div','floatl')[-1].renderContents()).splitlines() if x.strip()][-2]
         date_object = datetime.strptime(date_str, 'Posted: %b-%d %I:%M %p')
         current_year = datetime.utcnow().year
         if date_object.month > datetime.utcnow().month:
             current_year -=  1
         my_object = datetime(year=current_year, month=date_object.month, day=date_object.day, hour=date_object.hour, minute=date_object.minute)
         page['posted_date'] = datetime.strftime(my_object, "%Y-%m-%dT%H:%M:%SZ")
     except:
         page['posted_date'] = datetime.strftime(datetime.utcnow(), "%Y-%m-%dT%H:%M:%SZ")
         log.exception(self.log_msg('Posted date not found'))
     try:
         page['et_author_name'] = stripHtml(post.find('div', id=re.compile('from\-')).renderContents()).replace('From: ','').strip()
     except:
         log.info(self.log_msg('Author name not found in %s'% self.currenturi))
     if len(self.__hierarchy) >= 2:
         page['title'] = page['et_thread_topic'] = self.__hierarchy[-1]
         page['et_thread_forum'] = self.__hierarchy[-2]
         if not is_question:
             page['title'] = 'Re: ' + page['title']
     return page
 def __getData(self, post, is_question):
     """ This will return the page dictionry
     """
     page = {'entity':'question' if is_question else 'answer'}
     try:
         post_info =  dict([(x.strip().lower(), y.strip()) for x,y in  [x.strip().split(':', 1) for x in stripHtml(post.find('b', text='Subject:').findParent('td').renderContents()).splitlines() if x.strip()]])
         page['uri'] = post_info['url']
         page['title'] = post_info['subject']
     except:
         log.info(self.log_msg('Cannot find the title, uri details'))
         return True
     try:
         page['data'] = stripHtml(post.findAll('table')[1].find('p').renderContents())
     except:
         log.info(self.log_msg('Data not found for the url %s'%self.currenturi))
         return True
     try:
         page['posted_date'] = datetime.strftime(datetime.strptime( post_info['date'].split(' ( ')[0].strip(), '%m/%d/%Y %I:%M:%S %p'),"%Y-%m-%dT%H:%M:%SZ")
     except:
         page['posted_date'] = datetime.strftime(datetime.utcnow(), "%Y-%m-%dT%H:%M:%SZ")
         log.info(self.log_msg('Date not be found in %s'% self.currenturi))
     try:
         page['et_author_name'] = post_info['username'].split(' contact ')[1].strip()
     except:
         log.info(self.log_msg('Ratings count not found'))
     try:
         page['ei_data_views_count'] = int(post_info['hits'])
     except:
         log.exception(self.log_msg('uri not found'))            
     try:
         if is_question:
             page['ei_data_replies_count'] = int(post_info['replies'])
     except:
         log.info(self.log_msg('data replies not found'))
     return page
def start_end_date_for_period(period, default_start_date=False, default_end_date=False):
    """Return the start and end date for a goal period based on today

    :param str default_start_date: string date in DEFAULT_SERVER_DATE_FORMAT format
    :param str default_end_date: string date in DEFAULT_SERVER_DATE_FORMAT format

    :return: (start_date, end_date), dates in string format, False if the period is
    not defined or unknown"""
    today = date.today()
    if period == 'daily':
        start_date = today
        end_date = start_date
    elif period == 'weekly':
        delta = timedelta(days=today.weekday())
        start_date = today - delta
        end_date = start_date + timedelta(days=7)
    elif period == 'monthly':
        month_range = calendar.monthrange(today.year, today.month)
        start_date = today.replace(day=1)
        end_date = today.replace(day=month_range[1])
    elif period == 'yearly':
        start_date = today.replace(month=1, day=1)
        end_date = today.replace(month=12, day=31)
    else:  # period == 'once':
        start_date = default_start_date  # for manual goal, start each time
        end_date = default_end_date

        return (start_date, end_date)

    return (datetime.strftime(start_date, DF), datetime.strftime(end_date, DF))
Beispiel #8
0
def main():
    argv = docopt(USAGE)
    if argv["--start"]:
        start = pacific_to_utc(argv["--start"])
    elif argv["--last"]:
        start = datetime.strftime(
                datetime.utcnow() - timedelta(minutes=int(argv["--last"])), "%Y-%m-%dT%H:%M:%SZ")
    else:
        start = datetime.strftime(datetime.utcnow() - timedelta(days=7), "%Y-%m-%dT%H:%M:%SZ")

    if argv["--end"]:
        end = pacific_to_utc(argv["--end"])
    else:
        end = datetime.strftime(datetime.utcnow(),"%Y-%m-%dT%H:%M:%SZ")

    pager = PagerDuty(argv['<subdomain>'], argv['<api-token>'], argv['<policy>'])
    for command in ['all','wakeups','flakes']:
        if argv[command]:
            incidents = pager.do_list(command, argv['--no-thurs'], since=start, until=end)
            incident_list = list(incidents)
            (incident_list_prod, incident_list_staging)=segregation(incident_list)
            if incidents:
                if argv['--email']:
                    email_output(incident_list_prod, incident_list_staging, argv['--top'])
                elif argv['--top']:
                    pprint_rankings(top(incident_list_prod, argv['--top']))
                    pprint_rankings(top(incident_list_staging, argv['--top']))
                else:
                    pprint_incidents(incident_list_prod)
                    pprint_incidents(incident_list_staging)


    if argv['mtr']:
        print pager.get_mtr(since=start, until=end)
Beispiel #9
0
def plot_basic_stats(start, end, path_list, image_name_list, html_string):
    new_path_list = []
    new_image_name_list = []
    duration = datetime.strftime(start, '%Y%m%d' ) + '_to_' + datetime.strftime(end, '%Y%m%d' ) + '.png'
    image_name = 'Vol_euro_from_' + duration
    new_path_list.append(FOLDER + image_name)
    new_image_name_list.append(image_name)
    
    image_name = 'Occ_euro_from_' + duration
    new_path_list.append(FOLDER + image_name)
    new_image_name_list.append(image_name)
    
    image_name = 'Place_from_' + duration
    new_path_list.append(FOLDER + image_name)
    new_image_name_list.append(image_name)
    
    image_name = 'Intraday_algo_vol_from_' + duration
    new_path_list.append(FOLDER + image_name)
    new_image_name_list.append(image_name)
        
    period = PlotEngine(start_date  = start, end_date = end)
    period.plot_basic_stats(path    = new_path_list)
    period.plot_intraday_exec_curve(duration = 'From ' + datetime.strftime(start, '%Y/%m/%d' ) + ' to ' + datetime.strftime(end, '%Y/%m/%d' )).savefig(FOLDER + image_name)
    
    for image in new_image_name_list:
        html_string += '<img src="cid:%s">\n' %image
    
    path_list.extend(new_path_list)
    image_name_list.extend(new_image_name_list)
    
    return html_string
Beispiel #10
0
 def savePoint(self, lat=None, lon=None):
     if (lat == None or lon == None):
         return "Erro - Latitude ou Longitude não foram especificadas"
     writer = csv.writer(open('points.csv', 'ab')) #, delimiter=' ', quotechar='|', quoting=csv.QUOTE_MINIMAL
     #writer.writerow(['Horario', 'Latitude', 'Longitude'])
     writer.writerow([datetime.strftime(datetime.now(), "%d/%m/%Y %H:%M:%S"), str(lat), str(lon)])
     return "Point (" + str(lat) + ',' + str(lon) + ") saved at " + datetime.strftime(datetime.now(), "%d/%m/%Y %H:%M:%S")
 def lookup_tweets(self, date_time, margin):
     """
     Retrieve potentially relevant tweets around a date of a info trafic event
     :param date_time: date of info trafic event
     :param margin: index date margin
     :return: list of tweets
     """
     data = None
     try:
         dt_min, dt_max = self.get_bounds_dates(date_time, 1)
         index_name = self.get_tweet_index_name(date_time, margin)
         dt_min = datetime.strftime(dt_min, '%Y%m%d%H%M%S')
         dt_max = datetime.strftime(dt_max, '%Y%m%d%H%M%S')
         q = json.loads(pystache.render(self.query_sel_tweets, {'dt_min': dt_min, 'dt_max': dt_max}))
         res = self.es.select_page(index_name, q, 0)
         i = 1
         data = []
         long_data = []
         while len(res) != 0:
             short_res = map(lambda x: [x['_source']['created_at'], x['_source']['text']], res)
             data.extend(short_res)
             long_data.extend(res)
             res = self.es.select_page(index_name, q, i)
             i += 1
     except:
         traceback.print_exc()
         pass
     return data, long_data
 def fetch(self):
     """Fetch of gamasutra.com
     """
     try:
         self.__genre = 'review'
         self.__task_elements_dict = {
             'priority':self.task.priority,
             'level': self.task.level,
             'last_updated_time':datetime.strftime(datetime.utcnow(),"%Y-%m-%dT%H:%M:%SZ"),
             'pickup_date':datetime.strftime(datetime.utcnow(),"%Y-%m-%dT%H:%M:%SZ"),
             'connector_instance_log_id': self.task.connector_instance_log_id,
             'connector_instance_id':self.task.connector_instance_id,
             'workspace_id':self.task.workspace_id,
             'client_id':self.task.client_id,
             'client_name':self.task.client_name,
             'versioned':False,
             'category':self.task.instance_data.get('category',''),
             'task_log_id':self.task.id }
         self.__setSoupForCurrentUri()
         self.__setParentPage()
         posts = self.__iteratePosts()
         for post in posts:
             if not self.__addPost(post):
                 log.debug(self.log_msg('Post not added to self.pages for url\
                                                         %s'%self.currenturi))
                 #return False
                 break
         return True
     except:
         log.exception(self.log_msg('Exception in fetch for the url %s'%self.currenturi))
     return True
Beispiel #13
0
def ng_scrape_blammed(soup):

	# Scrape web page
	#url = ('http://www.newgrounds.com/portal/view/381115')
	
	
	# PARSE CONTENT
	# Page title
	title = soup.title.string.split(':')[1].strip()
	
	# Author
	author = soup.find_all("em")[5].text.split(' ')[2]

	# Author Comments
	try:
		author_comments = soup.find("h2", text="Author Comments").parent.findNextSibling().text
		author_comments = author_comments.replace('\n',' ').replace(',','').strip()
	except:
		author_comments = ''
	
	# Page Star Rating
	stars = soup.find(id='score_number').text
	
	# Date Uploaded
	date_uploaded = soup.find(id='eulogy').span.text.split(' ')[0]
	date_blammed = soup.find(id='eulogy').span.text.split(' ')[2]
	# Standard date formats
	date_uploaded = datetime.strftime(datetime.strptime(date_uploaded, '%m-%d-%y').date(), '%m/%d/%Y')
	date_blammed = datetime.strftime(datetime.strptime(date_blammed, '%m-%d-%y').date(), '%m/%d/%Y')
	
	return[title, author, date_uploaded, date_blammed, stars, author_comments]
def multi_manpower_add(request):
	if request.method=="POST":
		print "multi_project_code",request.POST['multi_project_code']
		print request.POST['multi_date_from']
		print request.POST['multi_date_to']
		if request.POST['multi_project_code'] and request.POST['multi_date_from'] and request.POST['multi_date_to']:
			project_id=request.POST['multi_project_code']
			multi_date_from=request.POST['multi_date_from']
			multi_date_to=request.POST['multi_date_to']
			try:
				project=Project.objects.get(id=project_id)
				print "project",project
				manpower=ManPower.objects.filter(project=project)
				print "manpower",manpower
				for m in manpower:
					multi_datetime_in=datetime.combine(datetime.strptime(multi_date_to,"%Y-%m-%d"),time(hour=m.time_in.hour,minute=m.time_in.minute,second=m.time_in.second,microsecond=0))
					if datetime.strftime(m.time_in,"%Y-%m-%d")==datetime.strftime(m.time_out,"%Y-%m-%d"):
						multi_datetime_out=datetime.combine(datetime.strptime(multi_date_to,"%Y-%m-%d"),datetime.strftime(m.time_out,"%H:%M:%S"))
					else:
						multi_datetime_out=datetime.combine(datetime.strptime(multi_date_to,"%Y-%m-%d")+datetime.timedelta(datetime.strftime(m.time_out,"%Y-%m-%d")-datetime.strftime(m.time_in,"%Y-%m-%d")),datetime.strftime(m.time_out,"%H:%M:%S"))
						print "multi_datetime_out",multi_datetime_out
					new_manpower=Manpower.objects.create(employee=m.employee,project=m.project,time_in=multi_datetime_in,time_out=multi_datetime_out,lunch=m.lunch,shift=m.shift,working_time=m.working_time,remark=m.remark,create_by=request.user,is_checked=False,checked_by="",is_payed=False,payed_period="")
					new_manpower.save()
				return render(request,"manpower/manpower_multi_add.html",{"new_manpower":new_manpower})
			except Exception as e:
				return single_manpower_add(request)
		else:
			return HttpResponse("Please fill complete information,click <a href='/manpower/add/'>here</a> go back.")
	else:
		return single_manpower_add(request)
Beispiel #15
0
 def analize(self, value, field_type, id):
     if field_type=='DateTimeField':
         try:
             value = datetime.strftime(value,self.datetime_format)
         except:
             value = Grid.UNDEFINED
     elif field_type=='DateField':
         try:
             value = datetime.strftime(value, self.date_format)
         except:
             value = Grid.UNDEFINED
     elif field_type=='TimeField':
         try:
             value = datetime.strftime(value, self.time_format)
         except:
             value = Grid.UNDEFINED
     elif field_type=='BooleanField':
         if value==True:
             value = 1
         else:
             value = 0
     elif field_type in ['ForeignKey','OneToOneField']:
         try:
             value = '<a href="%s" title="%s">%s</a>' % (value.get_absolute_url(), smart_unicode(value) )
         except:
             value = smart_unicode(value)
     elif field_type in ['FileField','ImageField']:
         try:
             value = value.name
         except:
             value = Grid.UNDEFINED          
     return value
Beispiel #16
0
    def serialize(self, fields = []):
        from sys2do.util.common import _info
        result = {}
        m = self.__mapper__.columns

        for cname in fields:
            if cname not in m.keys(): continue
            colClz = m[cname]
            if isinstance(colClz, Column):
                v = getattr(self, cname)

                if v is None: v = u'' #the value is None
                elif m[cname].foreign_keys : #it's a foreign key
                    tmp_name = cname.replace("_id", '')
                    try:
                        v = unicode(getattr(self, tmp_name))
                    except:
                        _info(traceback.print_exc())
                        v = u''
                elif isinstance(v, dt) : v = dt.strftime(SYSTEM_DATETIME_FORMAT)
                elif isinstance(v, date) : v = dt.strftime(SYSTEM_DATE_FORMAT)
                elif isinstance(v, (int, float)) : v = u'%s' % v
                else: v = unicode(v)

                result[cname] = (v, colClz.doc or cname)
#        _info(result)
        return result
 def fetch(self):
     """Fetch of games.yahoo.com
     """
     try:
         self.__genre = 'review'
         self.__task_elements_dict = {
             'priority':self.task.priority,
             'level': self.task.level,
             'last_updated_time':datetime.strftime(datetime.utcnow(),"%Y-%m-%dT%H:%M:%SZ"),
             'pickup_date':datetime.strftime(datetime.utcnow(),"%Y-%m-%dT%H:%M:%SZ"),
             'connector_instance_log_id': self.task.connector_instance_log_id,
             'connector_instance_id':self.task.connector_instance_id,
             'workspace_id':self.task.workspace_id,
             'client_id':self.task.client_id,
             'client_name':self.task.client_name,
             'versioned':False,
             'category':self.task.instance_data.get('category',''),
             'task_log_id':self.task.id }
         self.__setSoupForCurrentUri()
         while True:
             try:
                 if not self.__iteratePosts():
                     log.info(self.log_msg('No more links found'))
                     break
                 self.currenturi = self.soup.find('li','next').find('a')['href']
                 self.__setSoupForCurrentUri() 
             except:
                 log.exception(self.log_msg('next page not found')) 
                 break   
     except:
         log.exception(self.log_msg('Exception in fetch for the url %s'%self.currenturi))
     return True
 def _get_date(self, cr, uid, id, start_date, delay, resource=False, context=None):
     """This method gives the first date after a delay from the start date
         considering the working time attached to the company calendar.
         Start_date should be a date not an openerp date
     """
     if not id:
         company_id = self.pool['res.users'].read(cr, uid, uid,
                                                  ['company_id'],
                                                  context=context)['company_id'][0]
         company = self.pool['res.company'].read(cr, uid, company_id,
                                                 ['calendar_id'],
                                                 context=context)
         if not company['calendar_id']:
             raise orm.except_orm(_('Error !'),
                                  _('You need to define a calendar for the company !'))
         id = company['calendar_id'][0]
     dt_leave = self._get_leaves(cr, uid, id, resource)
     calendar_info = self.browse(cr, SUPERUSER_ID, id, context=context)
     worked_days = [day['dayofweek'] for day in calendar_info.attendance_ids]
     if delay < 0:
         delta = -1
     else:
         delta = 1
     while datetime.strftime(start_date, DEFAULT_SERVER_DATE_FORMAT) in dt_leave or str(start_date.weekday()) not in worked_days:
         start_date = start_date + timedelta(days=delta)
     date = start_date
     while delay:
         date = date + timedelta(days=delta)
         if datetime.strftime(date, DEFAULT_SERVER_DATE_FORMAT) not in dt_leave and str(date.weekday()) in worked_days:
             delay = delay - delta
     return date
 def __getData(self, post):
     page = {}
     try:
         post_tag = BeautifulSoup(post.__str__().replace('/>>','/>'))
         table_tag = post_tag.find('table')
         if table_tag:
             table_tag.extract()
         try:    
             page['data'] = stripHtml(post_tag.renderContents())
             page['title']= ''
         except:
             log.exception(self.log_msg('Data not found for the url %s'%self.currenturi))
             return        
     
         try:
             date_str = stripHtml(table_tag.findAll('strong')[-1].renderContents())
             page['posted_date'] = datetime.strftime(datetime.\
                                     strptime(re.sub("(\d+)(st|nd|rd|th)",r"\1",date_str).\
                                     strip(),"%d %B %Y"),"%Y-%m-%dT%H:%M:%SZ")             
         except:
             log.exception(self.log_msg('Posted date not found'))
             page['posted_date'] = datetime.strftime(datetime.utcnow(), "%Y-%m-%dT%H:%M:%SZ")
         try:
             page['et_author_name'] = stripHtml(table_tag.findAll('strong')[0].renderContents())
         except:
             log.exception(self.log_msg('author name not found'))
     except:
         log.exception(self.log_msg('post tag not found'))        
      
     return page                                                                                                                                                                                      
Beispiel #20
0
def load_from_toggl():
    """Load live data from toggl
    """
    current_month = datetime.strftime(datetime.today(), '%m')

    # find out what month to report. default to the current month
    month = raw_input('Enter the month you want to report [%s]: ' % (current_month))
    if len(month) == 0:
        month = current_month

    # make sure we have a two character month
    while len(month) < 2:
        month = '0' + month

    start_date = datetime.today()
    monthrange = calendar.monthrange(start_date.year, start_date.month)
    data = {'start_date': '%s-01' % (datetime.strftime(start_date, '%Y-%m')),
            'end_date': '%s-%s' % (datetime.strftime(start_date, '%Y-%m'), monthrange[1])}
    print data

    # hit toggl for the time entries
    req = urllib2.Request('https://www.toggl.com/api/v6/time_entries.json?' + urllib.urlencode(data))
    authn_header = base64.encodestring('%s:api_token' % API_KEY)
    req.add_header('Authorization', 'Basic %s' % authn_header)
    try:
        resp = urllib2.urlopen(req)
    except urllib2.HTTPError, msg:
        print "Error loading time entries from toggl: %s" % (msg)
        exit(1)
 def fetch(self):
     """
     http://www.mrrebates.com/store_ratings/view_store_ratings.asp?merchant_id=1465
     """
     try:
         self.__setSoupForCurrentUri()
         self.__genre = "Review"
         self.__task_elements_dict = {
                         'priority':self.task.priority,
                         'level': self.task.level,
                         'last_updated_time':datetime.strftime(datetime.utcnow(), "%Y-%m-%dT%H:%M:%SZ"),
                         'pickup_date':datetime.strftime(datetime.utcnow(), "%Y-%m-%dT%H:%M:%SZ"),
                         'posted_date':datetime.strftime(datetime.utcnow(), "%Y-%m-%dT%H:%M:%SZ"),
                         'connector_instance_log_id': self.task.connector_instance_log_id,
                         'connector_instance_id':self.task.connector_instance_id,
                         'workspace_id':self.task.workspace_id,
                         'client_id':self.task.client_id,
                         'client_name':self.task.client_name,
                         'versioned':False,
                         'category':self.task.instance_data.get('category',''),
                         'task_log_id':self.task.id }
         while self.__iteratePosts():
             try:
                 self.currenturi = 'http://www.mrrebates.com' + self.soup.find('b', text='Next').parent.parent['href']
                 self.__setSoupForCurrentUri()
             except:
                 log.info(self.log_msg('No Previous URL found for url \
                                                     %s'%self.currenturi))
                 break
     except:
         log.exception(self.log_msg('Exception while fetching posts'))
     return True
Beispiel #22
0
    def __init__(self, jid, password, room, nick):
        sleekxmpp.ClientXMPP.__init__(self, jid, password)

        self.urlhandler = URLHandler(self)

        self.room = room
        self.nick = nick

        # The session_start event will be triggered when
        # the bot establishes its connection with the server
        # and the XML streams are ready for use. We want to
        # listen for this event so that we we can initialize
        # our roster.
        self.add_event_handler("session_start", self.start)

        # The groupchat_message event is triggered whenever a message
        # stanza is received from any chat room. If you also also
        # register a handler for the 'message' event, MUC messages
        # will be processed by both handlers.
        self.add_event_handler("groupchat_message", self.muc_message)

        # The groupchat_presence event is triggered whenever a
        # presence stanza is received from any chat room, including
        # any presences you send yourself. To limit event handling
        # to a single room, use the events muc::room@server::presence,
        # muc::room@server::got_online, or muc::room@server::got_offline.
        self.add_event_handler("muc::%s::got_online" % self.room,
                               self.muc_online)

        self.logfile = datetime.strftime(datetime.now(), "logs/%Y%m%d_%H%M.txt")
        self.urlfile = datetime.strftime(datetime.now(), "logs/urls.txt")
        self.logfile_handle = open(self.logfile, "w")
        self.urlfile_handle = open(self.logfile, "a")

        self.urls = [] #todo: read file
Beispiel #23
0
    def copy(self, cr, uid, id, default=None, context=None):
        if not default:
            default = {}
            
        record = self.browse(cr, uid, id, context=context)

        date_start = datetime.strptime(record.date_start,tools.DEFAULT_SERVER_DATETIME_FORMAT) 
        date_start = date_start + timedelta(days=1)
        date_start = datetime.strftime(date_start,tools.DEFAULT_SERVER_DATETIME_FORMAT)

        date_end = datetime.strptime(record.date_start,tools.DEFAULT_SERVER_DATETIME_FORMAT) 
        date_end = date_end + timedelta(days=1)
        date_end = datetime.strftime(date_end,tools.DEFAULT_SERVER_DATETIME_FORMAT)
        
        date = datetime.strptime(record.date,tools.DEFAULT_SERVER_DATE_FORMAT) 
        date = date + timedelta(days=1)
        date = datetime.strftime(date,tools.DEFAULT_SERVER_DATE_FORMAT)    
        #TODO: de introdus pozitii si pentru rute cu data incrementata      
        default.update({
            'log_fuel_ids':[],
            'name': self.pool.get('ir.sequence').next_by_code(cr, uid, 'fleet.map.sheet'),
            'date': date,
            'date_start': date_start,
            'date_start': date_end,
        })
        new_id = super(fleet_map_sheet, self).copy(cr, uid, id, default, context=context)
        return new_id
def download():
    form = AdvancedSearchForm()
    form.business_type.default = 'All Entities'
    if form.validate_on_submit():
        q_object = {
            'query': form.query.data,
            'query_limit': form.query_limit.data,
            'index_field': form.index_field.data,
            'active': form.active.data,
            'sort_by': form.sort_by.data,
            'sort_order': form.sort_order.data
        }
        try:
            q_object['start_date'] = datetime.strftime(form.start_date.data, '%Y-%m-%d')
            q_object['end_date'] = datetime.strftime(form.end_date.data, '%Y-%m-%d')
        except TypeError:
            q_object['start_date'] = date(year=1990, month=1, day=1)
            q_object['end_date'] = datetime.now()
        q_object['business_type'] = form.business_type.data
        results = query(q_object)
        file = StringIO()

        writer = csv.DictWriter(file, fieldnames=['name', 'id', 'origin date', 'status', 'type', 'street', 'city', 'state', 'zip'])
        writer.writeheader()
        for biz in results.all():
            row = {'name': biz.nm_name, 'id': biz.id_bus, 'origin date': biz.dt_origin, 'status': biz.status,
                   'type': biz.type, 'street': biz.street, 'city': biz.city, 'state': biz.state, 'zip': biz.zip}
            writer.writerow(row)
        file.seek(0)
        response = Response(file, content_type='text/csv')
        response.headers['Content-Disposition'] = 'attachment; filename=sots_search_results.csv'
        return response
Beispiel #25
0
    def _getBookingsBetweenTimestamps(self, fromDate, toDate,
                                      tz = 'UTC', conferenceId = None, categoryId = None, dateFormat = None):
        bookings = []
        nBookings = 0

        date = None
        bookingsForDate = None

        for timestamp, s in self._tree.iteritems(fromDate, toDate):
            currentDate = unixTimeToDatetime(timestamp, tz).date()

            if date != currentDate:
                if date is not None and bookingsForDate:
                    bookings.append((datetime.strftime(date, dateFormat), bookingsForDate))
                    nBookings += len(bookingsForDate)
                date = currentDate
                bookingsForDate = []

            if conferenceId:
                for booking in s:
                    if booking.getConference().getId() == conferenceId:
                        bookingsForDate.append(booking)
            elif categoryId:
                cc = CategoryChecker(categoryId)
                for booking in s:
                    if cc.check(booking.getConference()):
                        bookingsForDate.append(booking)
            else:
                bookingsForDate.extend(s)

        if date is not None and bookingsForDate:
            bookings.append((datetime.strftime(date, dateFormat), bookingsForDate))
            nBookings += len(bookingsForDate)

        return bookings, nBookings
 def gamecheck(self, gameURL):
     while True:
         try:
             response = urllib2.urlopen(gameURL)
             break
         except:
             check = datetime.today()
             print datetime.strftime(check, "%d %I:%M %p")
             print "gamecheck couldn't find file, trying again..."
             time.sleep(20)
     jsonResponse = json.load(response)
     timeData = jsonResponse["gameData"]["datetime"]
     if "timeDate" in timeData:
         timestring = timeData["timeDate"] + " " + timeData["ampm"]
         date_object = datetime.strptime(timestring, "%Y/%m/%d %I:%M %p")
     else:
         timestring = timeData["originalDate"] + " " + timeData["time"] + " " + timeData["ampm"]
         date_object = datetime.strptime(timestring, "%Y-%m-%d %I:%M %p")
     while True:
         check = datetime.today()
         if date_object >= check:
             if (date_object - check).seconds <= self.time_before:
                 return
             else:
                 print "Last game check: " + datetime.strftime(check, "%d %I:%M %p")
                 time.sleep(600)
         else:
             return
Beispiel #27
0
def statd(request, q_type = 'today'):  
	
	day_format = "%Y-%m-%d"
	today = datetime.combine(datetime.today(), time(9, 0));
	yesterday = today - timedelta(hours=24);
	domain = request.POST.get('domain', '')
	domain = '' if domain == "ALL" else domain;
	
	if not q_type:
		q_type = 'yestoday'

	datas = []
	if(q_type == 'today'):
                # today = datetime.combine(datetime.today(),time(9, 0)) + timedelta(hours=24)
		datas = get_statd(start = datetime.strftime(today, day_format), domain = domain);
	elif(q_type == 'yestoday'):
		datas = get_statd(start = datetime.strftime(yesterday, day_format), domain = domain);
	        # today = datetime.combine(datetime.today(), time(9, 0));
		# datas = get_statd(start = datetime.strftime(today, day_format), domain = domain);
	elif(q_type == 'week'):
		week_ago = date.today() - timedelta(days=7);
		datas = get_statd(start = datetime.strftime(week_ago, day_format), end = datetime.strftime(yesterday, day_format), domain = domain);
	elif(q_type == "custom"):
		start = request.POST["start"];
		end = request.POST["end"];
		message_id = request.POST["msg"];
		datas = get_statd(start = start, end = end, message_id = message_id, domain = domain);
		
	domains = set([data["domain_name"] for data in datas]);
	return locals();
Beispiel #28
0
def compute_attack(full=False):
    attackers = []
    if full:
        for n in graph_fwd.nodes():
            attackers.append(n)
    else:
        
        append_node_by_ip(entry_node,attackers)
        #Append compromised nodes
        for ip in compromised:
            append_node_by_ip(ip,attackers)
 
 
    #attackers.append('1884')

    # Build attack graph for attackers     
    print "Generating attack graph..."
    print datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M:%S')
    attack_graph = generateAttackGraph(graph_fwd, attackers, easiest)
    print datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M:%S')
    print "Final attack graph generated for the list of attackers: ", attackers
    pprint(attack_graph)
    t = datetime.strftime(datetime.now(), '%Y-%m-%d-%H-%M-%S')
    fName = projectPathName+"\\attack_graph\\attack_graph-"+t+".xml"
    # Save attack graph to XML
    print "Attack graph exported to XML format and saved in file attack_graph.xml"
    saveXML(attack_graph, fName)
Beispiel #29
0
    def getPlotTitle(self, prof_coll):
        modified = prof_coll.isModified() or prof_coll.isInterpolated()
        modified_str = "; Modified" if modified else ""

        loc = prof_coll.getMeta('loc')
        date = prof_coll.getCurrentDate()
        run = prof_coll.getMeta('run').strftime("%HZ")
        model = prof_coll.getMeta('model')
        observed = prof_coll.getMeta('observed')

        plot_title = loc + '   ' + datetime.strftime(date, "%Y%m%d/%H%M")
        if model == "Archive":
            fhour_str = ""
            if not prof_coll.getMeta('observed'):
                fhour = int(total_seconds(date - prof_coll.getMeta('base_time')) / 3600)
                fhour_str = " F%03d" % fhour
            plot_title += "  (User Selected" + fhour_str + modified_str + ")"
        elif model == "Analog":
            date = prof_coll.getAnalogDate()
            plot_title = loc + '   ' + datetime.strftime(date, "%Y%m%d/%H%M")
            plot_title += "  (Analog" + modified_str + ")"
        elif observed:
            plot_title += "  (Observed" + modified_str + ")"
        else:
            fhour = int(total_seconds(date - prof_coll.getMeta('base_time')) / 3600)
            plot_title += "  (" + run + "  " + model + "  " + ("F%03d" % fhour) + modified_str + ")"
        return plot_title
Beispiel #30
0
def meta_ingeststatus(request):
    """
    Returns the current information on the ingestion process
    """
    ingestions = {}

    for ingest in Ingestor.objects.using("default").all():
        if ingest.service_name not in ingestions:
            ingestions[ingest.service_name] = {
                'total': 0,
                'remaining': 0,
                'current': '',
                'completed': 0,
                'last_ingest_date': None,
                'current_start': None
            }
        if ingest.completed == 1 and ingest.completed_date:
            ingestions[ingest.service_name]['completed'] += 1
            if ingestions[ingest.service_name]['last_ingest_date'] is None or ingest.completed_date > ingestions[ingest.service_name]['last_ingest_date'].completed_date:
                ingestions[ingest.service_name]['last_ingest_date'] = ingest
        else:
            ingestions[ingest.service_name]['remaining'] += 1
        if ingest.completed == 0 and ingest.started == 1:
            ingestions[ingest.service_name]['current'] = ingest.meta
            if ingest.started_date is not None:
                ingestions[ingest.service_name]['current_start'] = datetime.strftime(ingest.started_date, "%Y-%m-%d %H:%M:%S")
        ingestions[ingest.service_name]['total'] += 1

    for ingest in ingestions:
        if ingestions[ingest]['last_ingest_date'] is not None:
            ingestions[ingest]['last_ingest_date'] = datetime.strftime(ingestions[ingest]['last_ingest_date'].completed_date, "%Y-%m-%d %H:%M:%S")

    return api.views.api_render(request, ingestions, status.HTTP_200_OK)
def main():
    # 导入模型
    network = importlib.import_module(Config.model_def)                 # 相当于导入 .py 文件

    # 用时间命名
    subdir = datetime.strftime(datetime.now(),'%Y%m%d-%H%M%S')
    model_dir = os.path.join(os.path.expanduser(Config.models_base_dir),subdir)
    if not os.path.isdir(model_dir):
        os.makedirs(model_dir)

    # 读取数据
    train_set = data_process.get_data_set(Config.data_dir)

    # 类别总数
    nrof_classes = len(train_set)

    pretrained_model = None
    if Config.pretrained_model:
        pretrained_model = os.path.expanduser(Config.pretrained_model)
        print('Pre-trained model: %s'%pretrained_model)

    with tf.Graph().as_default():
        global_step = tf.Variable(0,trainable=False)

        image_list, label_list = data_process.get_image_paths_and_labels(train_set)
        assert len(image_list)>0,'The dataset should not empty'

        labels = ops.convert_to_tensor(label_list,dtype=tf.int32)
        range_size = array_ops.shape(labels)[0]

        index_queue = tf.train.range_input_producer(range_size,num_epochs=None,shuffle=True,seed = None,capacity=32)

        index_dequeue_op = index_queue.dequeue_many(Config.batch_size*Config.epoch_size,'index_dequeue')

        learning_rate_placeholder = tf.placeholder(tf.float32,name='learning_rate')
        batch_size_placeholder = tf.placeholder(tf.int32,name='batch_size')
        train_flag = tf.placeholder(tf.bool,name='phase_train')
        image_paths_placeholder = tf.placeholder(tf.string,shape=(None,1),name='image_paths')
        labels_placeholder = tf.placeholder(tf.int64,shape=(None,1),name='labels')

        input_queue = data_flow_ops.FIFOQueue(capacity=500000,
                                              dtypes=[tf.string,tf.int64],
                                              shapes=[(1,),(1,)],
                                              shared_name=None,name=None)
        enqueue_op = input_queue.enqueue_many([image_paths_placeholder,labels_placeholder],name='enqueue_op')

        nrof_preprocess_threads = 4
        images_and_labels = []
        for _ in range(nrof_preprocess_threads):
            filenames, label = input_queue.dequeue()
            images = []
            for filename in tf.unstack(filenames):
                file_contents = tf.read_file(filename)
                image = tf.image.decode_image(file_contents, channels=3)
                if Config.random_rotate:
                    image = tf.py_func(data_process.random_rotate_image, [image], tf.uint8)
                if Config.random_crop:
                    image = tf.random_crop(image, [Config.image_size, Config.image_size, 3])
                else:
                    image = tf.image.resize_image_with_crop_or_pad(image, Config.image_size, Config.image_size)
                if Config.random_flip:
                    image = tf.image.random_flip_left_right(image)


                # pylint: disable=no-member
                image.set_shape((Config.image_size, Config.image_size, 3))
                images.append(tf.image.per_image_standardization(image))
            images_and_labels.append([images, label])

        image_batch,label_batch = tf.train.batch_join(
            images_and_labels,batch_size=batch_size_placeholder,
            shapes=[(Config.image_size,Config.image_size,3),()],enqueue_many=True,
            capacity=4*nrof_preprocess_threads*Config.batch_size,
            allow_smaller_final_batch=True)
        image_batch = tf.identity(image_batch,'image_batch')
        image_batch = tf.identity(image_batch,'input')
        label_batch = tf.identity(label_batch,'label_batch')

        print('Total number of classes: %d'%nrof_classes)
        print('Total number of examples: %d'%len(image_list))

        print('Building training graph')

        prelogits = network.inference(image_batch,Config.keep_prob,
                                        phase_train = train_flag,bottleneck_layer_size = Config.embedding_size,
                                        weight_decay = Config.weight_decay)
        logits = slim.fully_connected(prelogits, len(train_set), activation_fn=None,
                                      weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
                                      weights_regularizer=slim.l2_regularizer(Config.weight_decay),
                                      scope='Logits', reuse=False)

        embeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10, name='embeddings')

        # 添加中心损失
        if Config.center_loss_weight >0.0:
            prelogits_center_loss,_ = utils.center_loss(prelogits,label_batch,Config.center_loss_alfa,nrof_classes)
            tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES,prelogits_center_loss*Config.center_loss_weight)
        learning_rate = tf.train.exponential_decay(learning_rate_placeholder,global_step,
                                                   Config.learning_rate_decay_epochs*Config.epoch_size,
                                                   Config.learning_rate_decay_factor,staircase=True)


        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=label_batch,logits=logits,
                                                                       name='cross_entropy_per_example')
        cross_entropy_mean = tf.reduce_mean(cross_entropy,name='cross_entropy')
        tf.add_to_collection('losses',cross_entropy_mean)

        # 把中心损失加到交叉softmax损失上
        regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
        total_loss = tf.add_n([cross_entropy_mean]+regularization_losses,name='total_loss')

        # 一个batch 训练操作并更新模型参数
        train_op = train_batch(total_loss,global_step,Config.optimizer,learning_rate,
                               Config.moving_average_decay,tf.global_variables())

        # 创建一个保存器
        saver = tf.train.Saver(tf.trainable_variables(),max_to_keep=3)


        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction = Config.gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,log_device_placement=False))
        sess.run(tf.global_variables_initializer())

        # 获得线程坐标,启动填充队列的线程
        coord = tf.train.Coordinator()
        tf.train.start_queue_runners(coord=coord,sess=sess)

        with sess.as_default():
            sess.run(tf.local_variables_initializer())
            if pretrained_model:
                print('Restoring pretrained model: %s'%pretrained_model)
                meta_file, ckpt_file = utils.get_model_filenames(Config.pretrained_model)
                saver = tf.train.import_meta_graph(os.path.join(Config.pretrained_model, meta_file))
                saver.restore(sess, os.path.join(Config.pretrained_model, ckpt_file))
            print('Running training')
            epoch = 0
            while epoch < Config.max_nrof_epochs:
                step = sess.run(global_step,feed_dict=None)
                utils.save_variables_and_metagraph(sess, saver, model_dir, subdir, step)

                print('++++++++++save done++++++++++')
                epoch = step // Config.epoch_size
                # 训练一个epoch
                train(sess,epoch,image_list,label_list,index_dequeue_op,enqueue_op,image_paths_placeholder,labels_placeholder,
                      learning_rate_placeholder,train_flag,batch_size_placeholder,global_step,
                      total_loss,train_op,regularization_losses)
                utils.save_variables_and_metagraph(sess,saver,model_dir,subdir,step)

    return model_dir
def gather_data(
        access_token,
        access_token_secret,
        api_key,
        api_key_secret,
        chunked_user_list
):
    """
    Gather user info based on the chunked list of usernames with the provided
    bearer_token.
    """
    print("Gathering Data...")

    oauth1a = osometweet.OAuth1a(
        api_key=api_key,
        api_key_secret=api_key_secret,
        access_token=access_token,
        access_token_secret=access_token_secret
        )
    ot = osometweet.OsomeTweet(oauth1a)

    # Add all user_fields
    all_user_fields = osometweet.UserFields(everything=True)

    # Get today's date
    today = dt.strftime(dt.today(), "%Y-%m-%d_%H-%M")

    # Open two files. One for good data, the other for account errors.
    with open(f"account_data--{today}.json", 'w') as data_file,\
         open(f"account_errors--{today}.json", 'w') as error_file:

        # Iterate through the list of lists
        for one_hundred_users in chunked_user_list:
            response = ot.user_lookup_usernames(
                usernames=one_hundred_users,
                fields=all_user_fields
            )

            # Where as the user_ids endpoint always returns both "data" and
            # "errors", the username endpoint does the opposite - only
            # including these keys if data is present.
            if "data" in response:
                data = response["data"]
            else:
                data = None

            if "errors" in response:
                errors = response["errors"]
            else:
                errors = None

            try:
                data_file.writelines(f"{json.dumps(line)}\n" for line in data)
            except TypeError:
                print(
                    "No USER data found in this set of users, "
                    "skipping to the next set."
                )

            try:
                error_file.writelines(
                    f"{json.dumps(line)}\n" for line in errors
                )
            except TypeError:
                print(
                    "No problematic users found in this set of user, "
                    "skipping to the next set."
                )
Beispiel #33
0
def plot_prophet_forecast(energy_forecast_idf, df_idf_plot, mape, figname):
    plt.figure(figsize=(20, 5), linewidth=2)

    forecast = energy_forecast_idf[
        energy_forecast_idf["ds"] > md.END_TRAIN_DATE]
    model_past_fitting = energy_forecast_idf[
        energy_forecast_idf["ds"] <= md.END_TRAIN_DATE]

    plt.plot(df_idf_plot.index,
             df_idf_plot[c.EnergyConso.CONSUMPTION],
             color="steelblue",
             label="observations",
             linewidth=2)
    plt.plot(forecast["ds"],
             forecast["yhat"],
             color="g",
             label="forecasting",
             linewidth=2)

    if len(model_past_fitting) > 0:
        plt.plot(model_past_fitting["ds"],
                 model_past_fitting["yhat"],
                 color="g",
                 label="fitting of past values",
                 linestyle="dashed",
                 linewidth=2)

    plt.fill_between(forecast["ds"],
                     forecast["yhat_lower"],
                     forecast["yhat_upper"],
                     color="green",
                     alpha=0.2,
                     label="90% confidence interval")
    plt.title("Prophet: Prediction for Ile de France with" +
              " MAPE: {}%".format(str(round(100 * mape, 1))),
              fontsize=TITLE_FONTSIZE)
    plt.grid(which="both")
    plt.ylabel("Consumption (MW)", fontsize=LABEL_FONTSIZE)

    ax = plt.gca()
    ax.set_xlim([
        md.END_TRAIN_DATE - timedelta(days=md.NB_HOURS_PRED / 24),
        md.END_TRAIN_DATE + timedelta(days=md.NB_HOURS_PRED / 24)
    ])
    ax.set_ylim([12000, 28000])
    xticks = [
        md.END_TRAIN_DATE + timedelta(days=x)
        for x in [-11, -7, -3, 0, 4, 8, 12]
    ]
    ax.set_xticks(xticks, minor=False)
    ax.set_xticklabels(
        [datetime.strftime(date, "%Y-%m-%d") for date in xticks],
        minor=False,
        fontsize=LABEL_FONTSIZE)
    yticks = np.arange(14000, 28000, step=2000)
    ax.set_yticks(yticks)
    ax.set_yticklabels([str(x) for x in yticks], fontsize=LABEL_FONTSIZE)

    plt.legend()

    plt.savefig(
        os.path.join(PROPHET_PLOTS,
                     "{}.png".format(figname.replace(".pkl", ""))))

    plt.close()
Beispiel #34
0
def main(args):
  
    # 模型,定义在inception_resnet_v1 V2里(), --model_def models.inception_resnet_v1  
    network = importlib.import_module(args.model_def)
    image_size = (args.image_size, args.image_size)

    subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
    log_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir)
    if not os.path.isdir(log_dir):  # Create the log directory if it doesn't exist
        os.makedirs(log_dir)
    model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)
    if not os.path.isdir(model_dir):  # Create the model directory if it doesn't exist
        os.makedirs(model_dir)

    stat_file_name = os.path.join(log_dir, 'stat.h5')

    # Write arguments to a text file
    facenet.write_arguments_to_file(args, os.path.join(log_dir, 'arguments.txt'))
        
    # Store some git revision info in a text file in the log directory
    src_path,_ = os.path.split(os.path.realpath(__file__))
    facenet.store_revision_info(src_path, log_dir, ' '.join(sys.argv))

    np.random.seed(seed=args.seed)
    random.seed(args.seed)
    dataset = facenet.get_dataset(args.data_dir)
    if args.filter_filename:
        dataset = filter_dataset(dataset, os.path.expanduser(args.filter_filename), 
            args.filter_percentile, args.filter_min_nrof_images_per_class)
        
    if args.validation_set_split_ratio>0.0:
        train_set, val_set = facenet.split_dataset(dataset, args.validation_set_split_ratio, args.min_nrof_val_images_per_class, 'SPLIT_IMAGES')
    else:
        train_set, val_set = dataset, []
        
    nrof_classes = len(train_set)
    
    print('Model directory: %s' % model_dir)
    print('Log directory: %s' % log_dir)
    pretrained_model = None
    if args.pretrained_model:
        pretrained_model = os.path.expanduser(args.pretrained_model)
        print('Pre-trained model: %s' % pretrained_model)
    
    if args.lfw_dir:
        print('LFW directory: %s' % args.lfw_dir)
        # Read the file containing the pairs used for testing
        pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))
        # Get the paths for the corresponding images
        lfw_paths, actual_issame = lfw.get_paths(os.path.expanduser(args.lfw_dir), pairs)
    
    with tf.Graph().as_default():
        tf.set_random_seed(args.seed)
        global_step = tf.Variable(0, trainable=False)
        
        # Get a list of image paths and their labels
        # 训练数据
        image_list, label_list = facenet.get_image_paths_and_labels(train_set)
        assert len(image_list)>0, 'The training set should not be empty'
        
        # 测试数据
        val_image_list, val_label_list = facenet.get_image_paths_and_labels(val_set)

        # Create a queue that produces indices into the image_list and label_list 
        # tf.convert_to_tensor用于将不同数据变成张量:比如可以让数组变成张量、也可以让列表变成张量。
        labels = ops.convert_to_tensor(label_list, dtype=tf.int32)
        range_size = array_ops.shape(labels)[0]
        # 多线程读取数据,shuffle=True表示不是按顺序存储,可以随机获取,并一直循环。
        # https://blog.csdn.net/lyg5623/article/details/69387917
        index_queue = tf.train.range_input_producer(range_size, num_epochs=None,
                             shuffle=True, seed=None, capacity=32)
        
        # epoch 大数据时迭代完一轮时次数,少量数据应该epoch = 全部数据个数/batch
        index_dequeue_op = index_queue.dequeue_many(args.batch_size*args.epoch_size, 'index_dequeue')
        
        learning_rate_placeholder = tf.placeholder(tf.float32, name='learning_rate')
        batch_size_placeholder = tf.placeholder(tf.int32, name='batch_size')
        phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')
        image_paths_placeholder = tf.placeholder(tf.string, shape=(None,1), name='image_paths')
        labels_placeholder = tf.placeholder(tf.int32, shape=(None,1), name='labels')
        control_placeholder = tf.placeholder(tf.int32, shape=(None,1), name='control')
        
        nrof_preprocess_threads = 4
        input_queue = data_flow_ops.FIFOQueue(capacity=2000000,
                                    dtypes=[tf.string, tf.int32, tf.int32],
                                    shapes=[(1,), (1,), (1,)],
                                    shared_name=None, name=None)
        enqueue_op = input_queue.enqueue_many([image_paths_placeholder, labels_placeholder, control_placeholder], name='enqueue_op')
        image_batch, label_batch = facenet.create_input_pipeline(input_queue, image_size, nrof_preprocess_threads, batch_size_placeholder)

        image_batch = tf.identity(image_batch, 'image_batch')
        image_batch = tf.identity(image_batch, 'input')
        label_batch = tf.identity(label_batch, 'label_batch')
        
        print('Number of classes in training set: %d' % nrof_classes)
        print('Number of examples in training set: %d' % len(image_list))

        print('Number of classes in validation set: %d' % len(val_set))
        print('Number of examples in validation set: %d' % len(val_image_list))
        
        print('Building training graph')
        
        # Build the inference graph
        prelogits, _ = network.inference(image_batch, args.keep_probability, 
            phase_train=phase_train_placeholder, bottleneck_layer_size=args.embedding_size, 
            weight_decay=args.weight_decay)
        # 因为模型输出的(bottleneck_layer_size)没有计算最后一层(映射到图片类型),这里计算最后一层             
        logits = slim.fully_connected(prelogits, len(train_set), activation_fn=None, 
                weights_initializer=slim.initializers.xavier_initializer(), 
                weights_regularizer=slim.l2_regularizer(args.weight_decay),
                scope='Logits', reuse=False)

        # 按行进行泛化,行的平方求和再求平方根,得到的值按行除每个行的元素,对深度层面泛化? interface里最后一层输出为128个节点,slim.fully_connected(net, bottleneck_layer_size, activation_fn=None, 
				#https://blog.csdn.net/abiggg/article/details/79368982
        embeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10, name='embeddings')

				# 计算loss函数,当然还有其它训练参数也会加到这里来,通过比训练过程中一个weight加到正则化参数里来tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, weight)
				#  模型中最后会把这个加到优化的loss中来。
				#L= L_softmax + λL_cneter = Softmax(W_i + b_yj) + λ1/2||f(x_i) - c_yj ||_2^2
				
        # Norm for the prelogits
        eps = 1e-4
        prelogits_norm = tf.reduce_mean(tf.norm(tf.abs(prelogits)+eps, ord=args.prelogits_norm_p, axis=1))
        # 模型中最后输出(bottleneck_layer_size每个类型的输出值的个数)的平均值加到正则化loss中,但prelogits_norm_loss_factor貌似为0
        tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, prelogits_norm * args.prelogits_norm_loss_factor)

        # 计算中心损失及增加的正则化loss中
        # Add center loss
        prelogits_center_loss, _ = facenet.center_loss(prelogits, label_batch, args.center_loss_alfa, nrof_classes)
        tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, prelogits_center_loss * args.center_loss_factor)

        learning_rate = tf.train.exponential_decay(learning_rate_placeholder, global_step,
            args.learning_rate_decay_epochs*args.epoch_size, args.learning_rate_decay_factor, staircase=True)
        tf.summary.scalar('learning_rate', learning_rate)

        
        # Calculate the average cross entropy loss across the batch
        # 计算预测损失,和上面框架的Softmax(W_i + b_yj) 
        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
            labels=label_batch, logits=logits, name='cross_entropy_per_example')
        cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
        # 预测损失平均值加到losses变量中
        tf.add_to_collection('losses', cross_entropy_mean)
        
        correct_prediction = tf.cast(tf.equal(tf.argmax(logits, 1), tf.cast(label_batch, tf.int64)), tf.float32)
        accuracy = tf.reduce_mean(correct_prediction)
        
        #计算总损失,cross_entropy_mean + 前面增加的一些正则化损失(包括模型中增加的),通过tf.GraphKeys.REGULARIZATION_LOSSES获取出来
        # Calculate the total losses
        regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
        total_loss = tf.add_n([cross_entropy_mean] + regularization_losses, name='total_loss')

        # Build a Graph that trains the model with one batch of examples and updates the model parameters
        train_op = facenet.train(total_loss, global_step, args.optimizer, 
            learning_rate, args.moving_average_decay, tf.global_variables(), args.log_histograms)
        
        # Create a saver
        saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=3)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.summary.merge_all()

        # Start running operations on the Graph.
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        summary_writer = tf.summary.FileWriter(log_dir, sess.graph)
        coord = tf.train.Coordinator()
        tf.train.start_queue_runners(coord=coord, sess=sess)

        with sess.as_default():

            if pretrained_model:
                print('Restoring pretrained model: %s' % pretrained_model)
                saver.restore(sess, pretrained_model)

            # Training and validation loop
            print('Running training')
            nrof_steps = args.max_nrof_epochs*args.epoch_size
            nrof_val_samples = int(math.ceil(args.max_nrof_epochs / args.validate_every_n_epochs))   # Validate every validate_every_n_epochs as well as in the last epoch
            stat = {
                'loss': np.zeros((nrof_steps,), np.float32),
                'center_loss': np.zeros((nrof_steps,), np.float32),
                'reg_loss': np.zeros((nrof_steps,), np.float32),
                'xent_loss': np.zeros((nrof_steps,), np.float32),
                'prelogits_norm': np.zeros((nrof_steps,), np.float32),
                'accuracy': np.zeros((nrof_steps,), np.float32),
                'val_loss': np.zeros((nrof_val_samples,), np.float32),
                'val_xent_loss': np.zeros((nrof_val_samples,), np.float32),
                'val_accuracy': np.zeros((nrof_val_samples,), np.float32),
                'lfw_accuracy': np.zeros((args.max_nrof_epochs,), np.float32),
                'lfw_valrate': np.zeros((args.max_nrof_epochs,), np.float32),
                'learning_rate': np.zeros((args.max_nrof_epochs,), np.float32),
                'time_train': np.zeros((args.max_nrof_epochs,), np.float32),
                'time_validate': np.zeros((args.max_nrof_epochs,), np.float32),
                'time_evaluate': np.zeros((args.max_nrof_epochs,), np.float32),
                'prelogits_hist': np.zeros((args.max_nrof_epochs, 1000), np.float32),
              }
            for epoch in range(1,args.max_nrof_epochs+1):
                step = sess.run(global_step, feed_dict=None)
                # Train for one epoch
                t = time.time()
                # 训练模型
                cont = train(args, sess, epoch, image_list, label_list, index_dequeue_op, enqueue_op, image_paths_placeholder, labels_placeholder,
                    learning_rate_placeholder, phase_train_placeholder, batch_size_placeholder, control_placeholder, global_step, 
                    total_loss, train_op, summary_op, summary_writer, regularization_losses, args.learning_rate_schedule_file,
                    stat, cross_entropy_mean, accuracy, learning_rate,
                    prelogits, prelogits_center_loss, args.random_rotate, args.random_crop, args.random_flip, prelogits_norm, args.prelogits_hist_max, args.use_fixed_image_standardization)
                stat['time_train'][epoch-1] = time.time() - t
                
                if not cont:
                    break
                # 在测试数据上计算正确率  
                t = time.time()
                if len(val_image_list)>0 and ((epoch-1) % args.validate_every_n_epochs == args.validate_every_n_epochs-1 or epoch==args.max_nrof_epochs):
                    validate(args, sess, epoch, val_image_list, val_label_list, enqueue_op, image_paths_placeholder, labels_placeholder, control_placeholder,
                        phase_train_placeholder, batch_size_placeholder, 
                        stat, total_loss, regularization_losses, cross_entropy_mean, accuracy, args.validate_every_n_epochs, args.use_fixed_image_standardization)
                stat['time_validate'][epoch-1] = time.time() - t

                # Save variables and the metagraph if it doesn't exist already
                save_variables_and_metagraph(sess, saver, summary_writer, model_dir, subdir, epoch)

                # Evaluate on LFW
                t = time.time()
                if args.lfw_dir:
                    evaluate(sess, enqueue_op, image_paths_placeholder, labels_placeholder, phase_train_placeholder, batch_size_placeholder, control_placeholder, 
                        embeddings, label_batch, lfw_paths, actual_issame, args.lfw_batch_size, args.lfw_nrof_folds, log_dir, step, summary_writer, stat, epoch, 
                        args.lfw_distance_metric, args.lfw_subtract_mean, args.lfw_use_flipped_images, args.use_fixed_image_standardization)
                stat['time_evaluate'][epoch-1] = time.time() - t

                print('Saving statistics')
                with h5py.File(stat_file_name, 'w') as f:
                    for key, value in stat.items():
                        f.create_dataset(key, data=value)
    
    return model_dir
Beispiel #35
0
from matplotlib.finance import quotes_historical_yahoo_ochl  # 注matplotlib包里已经没有了quotes_historical_yahoo方法了,改为quotes_historical_yahoo_ochl
from datetime import date
from datetime import datetime
import pandas as pd
import numpy as np
import time
import matplotlib.pyplot as plt  # 使用pylab 就直接import pylab as pl

today = date.today()
start = (today.year - 1, today.month, today.day)
quotes = quotes_historical_yahoo_ochl('AXP', start, today)  #美国运通公司最近一年股票代码
fields = ['date', 'open', 'close', 'high', 'low', 'volume']
list1 = []
for i in range(0, len(quotes)):
    x = date.fromordinal(int(quotes[i][0]))
    y = datetime.strftime(x, "%Y-%m-%d")
    list1.append(y)

qutoesdf = pd.DataFrame(quotes, index=list1,
                        columns=fields)  # 利用index属性可以将索引改变。 日期为格里高利时间,用函数改变
qutoesdf = qutoesdf.drop(['date'], axis=1)
# print qutoesdf

# 统计近一年每个月的股票开盘天数

listtemp = []
for i in range(0, len(qutoesdf)):
    temp = time.strptime(qutoesdf.index[i], "%Y-%m-%d")
    listtemp.append(temp.tm_mon)
# print listtemp
tempdf = qutoesdf.copy()
Beispiel #36
0
def current_date():
    return move_busdays(datetime.strftime(datetime.now(), '%Y-%m-%d'), 0)
Beispiel #37
0
def today_date():
    return datetime.strftime(datetime.now(), '%Y-%m-%d')
Beispiel #38
0
def optimize_diameters_execs(path, inpfile, execs, **kwargs):
    """ Optimize diameters

    Optimize pipe diameters of a hydraulic network using Genetic Algorithms.
    
    Warning! This assumes single-objective optimization, so only the first
    element of the best solutions tuple is going to be used.

    :param str path: path to the input file.
    :param str inpfile: EPANET's input file (INP) with network data.
    :param int pop: population size or number of individuals.
    :param int gen: number of generations.
    :param float cxbp: crossover (mating) probability.
    :param float mutpb: mutation probability.
    :param float indpb: individual mutation probability?
    :param str dir: directory to save the results.
    :return best: a list of diameters (best solution)
    """

    start = datetime.now()

    _unit_price = kwargs.get('prices', {})
    _popsize = kwargs.get('pop', 200)
    _cxpb = kwargs.get('cxpb', 0.9)
    _mutpb = kwargs.get('mutpb', 0.02)
    _indpb = kwargs.get('indpb', 0.05)
    _generations = kwargs.get('gen', 500)
    _dir = kwargs.get('dir', 'results/')

    f = '%Y_%m_%d-%H_%M_%S'
    _stats = _dir + 'ga_dimen_' + datetime.strftime(start, f) + '.csv'
    _sol = _stats[:-4] + '.txt'

    best = None
    stats = pd.DataFrame(
        columns=['gen', 'nevals', 'avg', 'std', 'min', 'max', 'bestFit'])

    for i in range(execs):
        print('Execution {0} of {1} ...'.format(i + 1, execs))

        b, p, r = optimize_diameters(path,
                                     inpfile,
                                     prices=_unit_price,
                                     pop=_popsize,
                                     cxpb=_cxpb,
                                     mutpb=_mutpb,
                                     indpb=_indpb,
                                     gen=_generations)
        if best is None:
            best = b.items[0]
        elif b.items[0].fitness.values[0] < best.fitness.values[0]:
            best = b.items[0]
        print("Best iter: ", best.fitness.values[0])
        # print("\nMy Dataframe\n")
        df = pd.DataFrame(r)
        df['bestFit'] = best.fitness.values[0]
        stats = stats.append(df)

    # Runtime
    runtime = datetime.now() - start
    print('Run time: {0} executions in {1}'.format(execs, runtime))

    # Save statistics to a text file
    # stats.to_csv(_stats)
    stats.to_csv(_stats, index=False)

    # Plot the convergence chart
    plot_convergence(execs, _generations, _stats)

    # Save best solution to a text file
    with open(_sol, 'a') as f:
        f.write('Best solution:\t{0}\n'.format(best))
        f.write('Best fitness:\t{0}\n'.format(best.fitness.values[0]))
        f.write('Run time:\t{0}'.format(runtime))

    return best
Beispiel #39
0
import time
from datetime import date, datetime

import click
from sqlalchemy import create_engine

sys.path.append('.')
sys.path.append('../ajna_docs/commons')
from ajna_commons.flask.conf import SQL_URI
from virasana.db import mongodb as db
from virasana.integracao.mercante import mercante_fsfiles
from virasana.integracao.mercante import processa_xml_mercante
from virasana.integracao.mercante import resume_mercante

today = date.today()
str_today = datetime.strftime(today, '%d/%m/%Y')


@click.command()
@click.option('--dias',
              default=10,
              help='Quantidade de dias a processar para trás - padrão 10')
@click.option('--fim',
              default=str_today,
              help='Dia de fim (dia/mês/ano) - padrão hoje')
@click.option('--download',
              is_flag=True,
              help='Baixar arquivos das datas da API do Aniita')
def update(dias, fim, download):
    """Script de linha de comando para integração do arquivo XML."""
    end = datetime.strptime(fim, '%d/%m/%Y')
Beispiel #40
0
    def image_color_histogram(self,
                              w: int = 12,
                              h: int = 8,
                              num_channels=3,
                              save_path: str = None):
        """
        Plot image color channel's histogram
        :param w: plot width
        :param h: plot height
        :param num_channels: image depth (number of channels)
        :param save_path: plot image save directory path, if None - do not save
        :return:
        """
        if save_path and not os.path.isdir(save_path):
            raise ValueError("Save path is not a directory or doesn't exists.")

        channels = [np.zeros((256, 1)) for i in range(num_channels)]
        ch_labels = ["red", "green", "blue"] if num_channels == 3 else ["gray"]
        num_images = 0
        for ann in tqdm(self._annotations):
            if not os.path.exists(ann.img_path):
                continue
            image = cv2.imread(ann.img_path)
            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
            num_channels = image.shape[2]
            if num_channels != 3:
                continue

            for j in range(num_channels):
                channels[j] += cv2.calcHist(image, [j], None, [256], [0, 256])

            num_images += 1

        if num_images != 0:
            for j in range(num_channels):
                channels[j] /= num_images
        else:
            raise ValueError("Image paths are not correct.")

        # create plot
        figure = plt.figure(figsize=(w, h))
        plt.title("Pixel channel distribution", fontsize=20)
        plt.axis("off")
        cols, rows = 2, 2
        for i in range(1, cols * rows + 1):
            figure.add_subplot(rows, cols, i)
            if i > num_channels + 1:
                break

            if i == num_channels + 1:
                for j in range(num_channels):
                    plt.plot(channels[j], color=ch_labels[j], alpha=0.8)
                plt.legend(ch_labels)
            else:
                plt.plot(channels[i - 1], color=ch_labels[i - 1], alpha=0.8)
                plt.legend([ch_labels[i - 1]])

            plt.ylabel("frequency",
                       color="gray",
                       fontsize=14,
                       fontweight="bold")
            plt.xlim([0, 256])
            plt.ylim(ymin=0)

        if save_path:
            plot_save_name = "image_color_histogram_{}.png".format(
                datetime.strftime(datetime.today(), "%y%m%d%H%M"))
            plt.savefig(os.path.join(save_path, plot_save_name))
        else:
            plt.show()
Beispiel #41
0
    def image_size_histogram(self,
                             w: int = 10,
                             h: int = 7,
                             save_path: str = None):
        """
        Plot image size frequency distribution
        :param w: plot width
        :param h: plot height
        :param save_path: plot image save directory path, if None - do not save
        :return:
        """
        if save_path and not os.path.isdir(save_path):
            raise ValueError("Save path is not a directory or doesn't exists.")

        info_df = self.info_df[["img_width", "img_height"]]
        size_images = np.array(info_df)
        unique_size_images = np.unique(size_images, axis=0)

        if len(unique_size_images.shape) == 1:
            unique_size_images = [unique_size_images]

        freq_unique_size_images = [
            len(self.info_df[size_images == unique_size_image]
                ["filename"].unique())
            for unique_size_image in unique_size_images
        ]

        # гистограмма распределения объектов на изображениях заданной размерности
        fig, ax = plt.subplots(figsize=(w, h))
        plt.gcf().subplots_adjust(bottom=0.15)
        plt.title("Image size distribution", fontsize=20)
        plt.ylabel("frequency", color="gray", fontsize=14, fontweight="bold")

        rects = plt.bar(np.arange(len(unique_size_images)),
                        freq_unique_size_images,
                        color="lightskyblue")
        plt.xticks(np.arange(len(unique_size_images)))
        ax.set_xticklabels(unique_size_images,
                           rotation=45,
                           fontdict={
                               "horizontalalignment": "center",
                               "size": 12
                           })

        for rect in rects:
            height = rect.get_height()
            ax.annotate(
                "{}".format(height),
                xy=(rect.get_x() + rect.get_width() / 2, height),
                xytext=(0, 3),  # 3 points vertical offset
                textcoords="offset points",
                ha="center",
                va="bottom",
            )

        if save_path:
            plot_save_name = "image_size_histogram_{}.png".format(
                datetime.strftime(datetime.today(), "%y%m%d%H%M"))
            plt.savefig(os.path.join(save_path, plot_save_name))
        else:
            plt.show()
from datetime import datetime

birthday = datetime(1991, 2, 4, 4)
print(birthday)

print("Year:  " + str(birthday.year))
print("Month: " + str(birthday.month))
print("Day:   " + str(birthday.day))
print("Hour:  " + str(birthday.hour))

print(datetime.now() - birthday)

parsed_date = datetime.strptime('Jan 15, 2018', '%b %d, %Y')
print(parsed_date.month)

date_string = datetime.strftime(datetime.now(), '%b %d, %Y')
print(date_string)
async def test_case_pool_upgrade(docker_setup_and_teardown, pool_handler,
                                 wallet_handler, get_default_trustee,
                                 adder_role, adder_role_num, editor_role,
                                 editor_role_num):
    trustee_did, _ = get_default_trustee
    # add adder to start pool upgrade
    adder_did, adder_vk = await did.create_and_store_my_did(
        wallet_handler, '{}')
    res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did,
                         adder_vk, None, adder_role)
    assert res['op'] == 'REPLY'
    # add editor to cancel pool upgrade
    editor_did, editor_vk = await did.create_and_store_my_did(
        wallet_handler, '{}')
    res = await send_nym(pool_handler, wallet_handler, trustee_did, editor_did,
                         editor_vk, None, editor_role)
    assert res['op'] == 'REPLY'
    # set rule for adding
    req = await ledger.build_auth_rule_request(
        trustee_did, '109', 'ADD', 'action', '*', 'start',
        json.dumps({
            'constraint_id': 'ROLE',
            'role': adder_role_num,
            'sig_count': 1,
            'need_to_be_owner': False,
            'metadata': {}
        }))
    res2 = json.loads(await
                      ledger.sign_and_submit_request(pool_handler,
                                                     wallet_handler,
                                                     trustee_did, req))
    print(res2)
    assert res2['op'] == 'REPLY'
    # set rule for editing
    req = await ledger.build_auth_rule_request(
        trustee_did, '109', 'EDIT', 'action', 'start', 'cancel',
        json.dumps({
            'constraint_id': 'ROLE',
            'role': editor_role_num,
            'sig_count': 1,
            'need_to_be_owner': False,
            'metadata': {}
        }))
    res3 = json.loads(await
                      ledger.sign_and_submit_request(pool_handler,
                                                     wallet_handler,
                                                     trustee_did, req))
    print(res3)
    assert res3['op'] == 'REPLY'
    # start pool upgrade
    init_time = 30
    version = '1.99.999'
    name = 'upgrade' + '_' + version + '_' + datetime.now(
        tz=timezone.utc).strftime('%Y-%m-%dT%H:%M:%S%z')
    _sha256 = hashlib.sha256().hexdigest()
    _timeout = 5
    reinstall = False
    force = False
    package = 'indy-node'
    dests = [
        'Gw6pDLhcBcoQesN72qfotTgFa7cbuqZpkX3Xo6pLhPhv',
        '8ECVSk179mjsjKRLWiQtssMLgp6EPhWXtaYyStWPSGAb',
        'DKVxG2fXXTU8yT5N7hGEbXB3dfdAnYv1JczDUHpmDxya',
        '4PS3EDQ3dW1tci1Bp6543CfuuebjFrg36kLAUcskGfaA',
        '4SWokCJWJc69Tn74VvLS6t2G2ucvXqM9FDMsWJjmsUxe',
        'Cv1Ehj43DDM5ttNBmC6VPpEfwXWwfGktHwjDJsTV5Fz8',
        'BM8dTooz5uykCbYSAAFwKNkYfT4koomBHsSWHTDtkjhW'
    ]
    docker_7_schedule = json.dumps(
        dict({
            dest: datetime.strftime(
                datetime.now(tz=timezone.utc) +
                timedelta(minutes=init_time + i * 5), '%Y-%m-%dT%H:%M:%S%z')
            for dest, i in zip(dests, range(len(dests)))
        }))
    req = await ledger.build_pool_upgrade_request(adder_did, name, version,
                                                  'start', _sha256, _timeout,
                                                  docker_7_schedule, None,
                                                  reinstall, force, package)
    res4 = json.loads(await
                      ledger.sign_and_submit_request(pool_handler,
                                                     wallet_handler, adder_did,
                                                     req))
    print(res4)
    assert res4['op'] == 'REPLY'
    # cancel pool upgrade
    req = await ledger.build_pool_upgrade_request(editor_did, name, version,
                                                  'cancel', _sha256, _timeout,
                                                  docker_7_schedule, None,
                                                  reinstall, force, package)
    res5 = json.loads(await
                      ledger.sign_and_submit_request(pool_handler,
                                                     wallet_handler,
                                                     editor_did, req))
    print(res5)
    assert res5['op'] == 'REPLY'
Beispiel #44
0
    def objects_frequency_scatter(self,
                                  class_name: str = None,
                                  w: int = 12,
                                  h: int = 8,
                                  save_path: str = None):
        """
        Plot image objects sizes frequency distribution
        :param class_name: monitoring only the specific class name
        :param w: plot width
        :param h: plot height
        :param save_path: plot image save directory path, if None - do not save
        :return:
        """
        if save_path and not os.path.isdir(save_path):
            raise ValueError("Save path is not a directory or doesn't exists.")

        if class_name:
            info_df = self.info_df[self.info_df["label"] == class_name]
        else:
            info_df = self.info_df

        box_widths = np.array(info_df["box_width"])
        box_heights = np.array(info_df["box_height"])

        # relative box shapes
        box_widths /= np.array(info_df["img_width"])
        box_heights /= np.array(info_df["img_height"])

        fig = plt.figure(figsize=(w, h))
        grid = plt.GridSpec(4, 4, hspace=0.5, wspace=0.5)

        main_ax = fig.add_subplot(grid[:-1, 1:])
        y_hist = fig.add_subplot(grid[:-1, 0], xticklabels=[], yticklabels=[])
        x_hist = fig.add_subplot(grid[-1, 1:], xticklabels=[], yticklabels=[])

        main_ax.set_title("Box size distribution", fontsize=20)
        main_ax.set_xlabel("box width/image width",
                           color="gray",
                           fontsize=14,
                           fontweight="bold")
        main_ax.set_ylabel("box height/image height",
                           color="gray",
                           fontsize=14,
                           fontweight="bold")

        # divide axis by 10 parts
        main_ax.xaxis.set_major_locator(plt.MaxNLocator(10))
        main_ax.yaxis.set_major_locator(plt.MaxNLocator(10))

        y_hist.set_xlabel("height frequency", fontweight="bold")
        y_hist.yaxis.set_major_locator(plt.MaxNLocator(10))
        x_hist.set_ylabel("width frequency", fontweight="bold")
        x_hist.xaxis.set_major_locator(plt.MaxNLocator(10))

        main_ax.plot(box_widths,
                     box_heights,
                     "bo",
                     markersize=3,
                     alpha=0.3,
                     color="deepskyblue")
        # main_ax.legend(['train', 'test'])

        x_hist.hist(box_widths,
                    700,
                    histtype="stepfilled",
                    orientation="vertical",
                    color="deepskyblue")
        x_hist.invert_yaxis()
        y_hist.hist(box_heights,
                    700,
                    histtype="stepfilled",
                    orientation="horizontal",
                    color="deepskyblue")
        y_hist.invert_xaxis()

        if save_path:
            plot_save_name = "objects_frequency_histogram_{}.png".format(
                datetime.strftime(datetime.today(), "%y%m%d%H%M"))
            plt.savefig(os.path.join(save_path, plot_save_name))
        else:
            plt.show()
Beispiel #45
0
network_huawei_ssh_ip = cf.get('network_huawei_ssh', 'network_huawei_ip').split(',')
network_cisco_telnet_ip = cf.get('network_cisco_telnet', 'network_cisco_ip').split(',')
network_dp_ssh_ip = cf.get('network_dp_ssh', 'network_dp_ip').split(',')
network_juniper_ssh_ip = cf.get('network_juniper_ssh', 'network_juniper_ip').split(',')
# 获取用户
network_user = cf.get('network_user','network_user')
# 获取密码
network_password = cf.get('network_user','network_password')


if __name__ == '__main__':
    mylogger = logger.Logger()
    s = GetNetworkConfig()
    # 获取当天日期
    now=datetime.now()
    today = datetime.strftime(now, '%Y%m%d')

    for huawei_ip in network_huawei_telnet_ip:
        logging.debug('-------Begin : ' + huawei_ip +'--------')
        login_cmd = "telnet "+huawei_ip.strip()
        config_file = backupdir+"/"+today+"_"+huawei_ip.strip()+"_config.txt"
        s.huawei_telnet_getconfig(login_cmd,network_user,network_password,config_file)
        logging.error('Success Backup!'+config_file)

    for huawei_ip in network_huawei_ssh_ip:
        logging.debug('-------Begin : ' + huawei_ip + '--------')
        login_cmd = "ssh ywpt@"+huawei_ip.strip()
        config_file = backupdir+"/"+today+"_"+huawei_ip.strip()+"_config.txt"
        s.huawei_ssh_getconfig(login_cmd,network_password,config_file)
        logging.error('Success Backup!'+config_file)
Beispiel #46
0
    def eds_file_parsing(self):
        self.ensure_one()
        record = str(base64.b64decode(self.eds_file), 'iso8859-4',
                     'strict').encode('iso8859-4', 'strict')
        dom = parseString(record)
        file_employees = dom.getElementsByTagName('gigv')
        if file_employees is None:
            return False
        emp_obj = self.env['hr.employee']
        rel_obj = self.env['hr.employee.relief']
        e_ids = [e.id for e in self.employee_ids]
        for fe in file_employees:
            emp_ids = []
            fe_pc = fe.getElementsByTagName('pers_kods')[0].toxml().replace(
                '<pers_kods>', '').replace('</pers_kods>', '')
            fe_name = fe.getElementsByTagName(
                'vards_uzvards')[0].toxml().replace('<vards_uzvards>',
                                                    '').replace(
                                                        '</vards_uzvards>', '')
            if fe_pc:
                emp_query_str = """SELECT id FROM hr_employee 
                    WHERE COALESCE(identification_id, '') != '' 
                    AND REPLACE(identification_id, '-', '') = '%s' 
                    AND id %s %s""" % (
                    fe_pc,
                    len(e_ids) == 1 and '=' or 'in',
                    len(e_ids) == 1 and e_ids[0] or tuple(e_ids),
                )
                self._cr.execute(emp_query_str)
                emp_ids = [r['id'] for r in self._cr.dictfetchall()]
            if (not e_ids) and fe_name:
                emp_query_str = """SELECT emp.id FROM hr_employee AS emp 
                    LEFT JOIN resource_resource AS res ON emp.resource_id = res.id 
                    WHERE UPPER(res.name) = %s 
                    AND emp.id %s %s""" % (
                    fe_name,
                    len(e_ids) == 1 and '=' or 'in',
                    len(e_ids) == 1 and e_ids[0] or tuple(e_ids),
                )
                self._cr.execute(emp_query_str)
                emp_ids = [r['id'] for r in self._cr.dictfetchall()]
            if emp_ids:
                dep_list = []
                dep_main = fe.getElementsByTagName('apgadajamie')
                if dep_main:
                    deps = dep_main[0].getElementsByTagName('apgadajamais')
                    for dep in deps:
                        dep_name = dep.getElementsByTagName(
                            'vards_uzvards')[0].toxml().replace(
                                '<vards_uzvards>',
                                '').replace('</vards_uzvards>', '')
                        dep_df = dep.getElementsByTagName(
                            'datums_no')[0].toxml().replace(
                                '<datums_no>', '').replace('</datums_no>', '')
                        dep_date_from = datetime.strftime(
                            datetime.strptime(dep_df,
                                              '%Y-%m-%dT%H:%M:%S').date(),
                            '%Y-%m-%d')
                        dep_dt = dep.getElementsByTagName(
                            'datums_lidz')[0].toxml().replace(
                                '<datums_lidz>',
                                '').replace('</datums_lidz>', '')
                        dep_date_to = datetime.strftime(
                            datetime.strptime(dep_dt,
                                              '%Y-%m-%dT%H:%M:%S').date(),
                            '%Y-%m-%d')
                        dep_list.append({
                            'type': 'dependent',
                            'name': dep_name,
                            'date_from': dep_date_from,
                            'date_to': dep_date_to
                        })
                dis_list = []
                add_main = fe.getElementsByTagName('papildu_atvieglojumi')
                if add_main:
                    adds = add_main[0].getElementsByTagName(
                        'papildu_atvieglojums')
                    for add in adds:
                        add_type = add.getElementsByTagName(
                            'veids')[0].toxml().replace('<veids>', '').replace(
                                '</veids>', '')
                        dis_type = False
                        if add_type == u'1. grupas invalīds':
                            dis_type = 'disability1'
                        if add_type == u'2. grupas invalīds':
                            dis_type = 'disability2'
                        if add_type == u'3. grupas invalīds':
                            dis_type = 'disability3'
                        if dis_type:
                            dis_df = add.getElementsByTagName(
                                'datums_no')[0].toxml().replace(
                                    '<datums_no>',
                                    '').replace('</datums_no>', '')
                            dis_date_from = datetime.strftime(
                                datetime.strptime(dis_df,
                                                  '%Y-%m-%dT%H:%M:%S').date(),
                                '%Y-%m-%d')
                            dis_dt = add.getElementsByTagName(
                                'datums_lidz')[0].toxml().replace(
                                    '<datums_lidz>',
                                    '').replace('</datums_lidz>', '')
                            dis_date_to = datetime.strftime(
                                datetime.strptime(dis_dt,
                                                  '%Y-%m-%dT%H:%M:%S').date(),
                                '%Y-%m-%d')
                            dis_list.append({
                                'type': dis_type,
                                'name': add_type,
                                'date_from': dis_date_from,
                                'date_to': dis_date_to
                            })
                umm_list = []
                umm_main = fe.getElementsByTagName('prognozetie_mnm')
                if umm_main:
                    umms = umm_main[0].getElementsByTagName('prognozetais_mnm')
                    for umm in umms:
                        umm_name = umm.getElementsByTagName(
                            'veids')[0].toxml().replace('<veids>', '').replace(
                                '</veids>', '')
                        umm_df = umm.getElementsByTagName(
                            'datums_no')[0].toxml().replace(
                                '<datums_no>', '').replace('</datums_no>', '')
                        umm_date_from = datetime.strftime(
                            datetime.strptime(umm_df,
                                              '%Y-%m-%dT%H:%M:%S').date(),
                            '%Y-%m-%d')
                        umm_dt = umm.getElementsByTagName(
                            'datums_lidz')[0].toxml().replace(
                                '<datums_lidz>',
                                '').replace('</datums_lidz>', '')
                        umm_date_to = datetime.strftime(
                            datetime.strptime(umm_dt,
                                              '%Y-%m-%dT%H:%M:%S').date(),
                            '%Y-%m-%d')
                        umm_amount = umm.getElementsByTagName(
                            'summa')[0].toxml().replace('<summa>', '').replace(
                                '</summa>', '')
                        umm_list.append({
                            'type': 'untaxed_month',
                            'name': umm_name,
                            'date_from': umm_date_from,
                            'date_to': umm_date_to,
                            'amount': float(umm_amount)
                        })
                for emp_id in emp_ids:
                    for dpl in dep_list:
                        self._cr.execute(
                            """SELECT id FROM hr_employee_relief 
                            WHERE type = 'dependent' 
                            AND employee_id = %s 
                            AND UPPER(name) = %s 
                            AND (date_from is Null OR date_from <= %s) 
                            AND (date_to is Null OR date_to >= %s)""", (
                                emp_id,
                                dpl['name'],
                                dpl['date_to'],
                                dpl['date_from'],
                            ))
                        dep_ids = [r['id'] for r in self._cr.dictfetchall()]
                        if dep_ids:
                            if len(dep_ids) > 1:
                                e_dep_ids = []
                                for dep in rel_obj.browse(dep_ids):
                                    if dep.date_from == dpl[
                                            'date_from'] or dep.date_to == dpl[
                                                'date_to']:
                                        e_dep_ids.append(dep.id)
                                if e_dep_ids:
                                    dep_ids = e_dep_ids
                            rel_obj.browse([dep_ids[0]]).write({
                                'date_from':
                                dpl['date_from'],
                                'date_to':
                                dpl['date_to']
                            })
                        if not dep_ids:
                            dep_data = dpl.copy()
                            dep_data.update({'employee_id': emp_id})
                            rel_obj.create(dep_data)
                    for dsl in dis_list:
                        diss = rel_obj.search([
                            ('employee_id', '=', emp_id),
                            ('type', '=', dsl['type']),
                            ('date_from', '=', dsl['date_from']),
                            ('date_to', '=', dsl['date_to'])
                        ])
                        if not diss:
                            dis_data = dsl.copy()
                            dis_data.update({'employee_id': emp_id})
                            rel_obj.create(dis_data)
                    for uml in umm_list:
                        umms = rel_obj.search([
                            ('employee_id', '=', emp_id),
                            ('type', '=', 'untaxed_month'),
                            ('date_from', '=', uml['date_from']),
                            ('date_to', '=', uml['date_to'])
                        ])
                        if umms:
                            umms.write({
                                'name': uml['name'],
                                'amount': uml['amount']
                            })
                        else:
                            umm_data = uml.copy()
                            umm_data.update({'employee_id': emp_id})
                            rel_obj.create(umm_data)


# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
Beispiel #47
0
# Set the number of timesteps per sample.
STEP_COUNT = 100  # Typically 100.
# Set the number of samples per condition.
SAMPLE_COUNT = 5  # Typically 5.
# set the number of conditions per iteration.
CONDITIONS = 1  # Typically 2 for Caffe and 1 for LQR.
# Set the number of trajectory iterations to collect.
ITERATIONS = 10  # Typically 10.

x0s = []
ee_tgts = []
reset_conditions = []

common = {
    'experiment_name': 'my_experiment' + '_' + \
                       datetime.strftime(datetime.now(), '%m-%d-%y_%H-%M'),
    'experiment_dir': EXP_DIR,
    'data_files_dir': EXP_DIR + 'data_files/',
    'target_filename': EXP_DIR + 'target.npz',
    'log_filename': EXP_DIR + 'log.txt',
    'conditions': CONDITIONS,
}

# Set up each condition.
for i in xrange(common['conditions']):
    # Use hardcoded default vals init and target locations
    ja_x0 = np.zeros(SENSOR_DIMS[JOINT_ANGLES])
    ee_pos_x0 = np.zeros((1, 3))
    ee_rot_x0 = np.zeros((3, 3))

    ee_pos_tgt = EE_POS_TGT
Beispiel #48
0
allMergesCommand = "git log --merges --oneline --grep 'to development' | sed -e 's/.*from //g' -e 's/ to developme.*//g' -e 's/.*branch .origin\///g' -e 's/. into development//g'"
allMergesList = str(shell(allMergesCommand,
                          basedir + "/" + repository)).replace(
                              "\\n", ",").replace('b"', "").split(',')
FeatureBugfixHotfixMerged = [
    x for x in FeatureBugfixHotfix if x.split(' torigin/')[1] in allMergesList
]

# format list allBranches
allBranches = []
format_list(FeatureBugfixHotfix, merged=None)
format_list(FeatureBugfixHotfixMerged, merged='merged')

# define threshold dates
before_day_all = datetime.today() - timedelta(days=int(days_all))
before_day_all_str = datetime.strftime(before_day_all, '%Y-%m-%d')
before_day_merged = datetime.today() - timedelta(days=int(days_merged))
before_day_merged_str = datetime.strftime(before_day_merged, '%Y-%m-%d')
logging.info(
    'Defining threshold dates:{} For all branches - "{}"{} For merged branches - "{}"'
    .format('\n', before_day_all_str, '\n', before_day_merged_str))

# define a list of branches (both merged and not merged) to be removed
logging.info('Defining all  branches that need to be removed')
toRemove = [
    x for x in allBranches if x[0] <= before_day_all_str or (
        x[2] == 'merged' and x[0] <= before_day_merged_str)
]
logging.info('There are {} branches will be removed'.format(len(toRemove)))
logging.info('Here is the list of branches have to be removed:{} {}'.format(
    '\n', toRemove))
Beispiel #49
0
def cd(d):
    if not d:
        return ''
    dt = datetime.strptime(d, '%d/%m/%Y')
    return datetime.strftime(dt, '%Y/%m/%d')
Beispiel #50
0
import os
from datetime import datetime, timedelta
os.environ.update({"DJANGO_SETTINGS_MODULE": "vgsite.settings"})

import django
django.setup()

import requests

from api.views import Api


NOW = datetime.now() + timedelta(hours=8)
print(NOW)
print("===== vg todays card catch =====")
if datetime.strftime(NOW, "%H:%M") == "09:31":
    Api("VG").get_vg_today_card()
print("===== ws todays card catch =====")
if datetime.strftime(NOW, "%H:%M") == "09:32":
    Api("WS").get_ws_today_card()
    for col in numeric_cols:
        try:
            a[col]=a[col].apply(quantile_replace)
        except:
            pass
    df_list.append(a)

df = pd.concat(df_list,axis=0)


#################
### Find  Missing Date-Hour Intervals

print('Finding Missing Data Intervals',end='\n\n')

df['day_hour'] = df['DateTime'].apply(lambda x: datetime.strftime(x, "%Y-%m-%d %H"))

df = df.groupby(['station', 'day_hour']).mean().reset_index() ## Se pierde p01i skyc1 porque son categóricas

#### Crear las horas que no tienen observaciones
df['day_hour'] = pd.to_datetime(df['day_hour'])
minimo = df.day_hour.min()
maximo = df.day_hour.max()

# Acotar la longitud y latitud para tener siempre las mismas coordenadas

#aux = df_clean[df_clean['station'].isin(inter_airport)]
lista = df[['station']].drop_duplicates().values.tolist()

#Crear las fechas
df2 = pd.DataFrame()
    def parse1(self, response):
        hxs = Selector(response)

        if 'listing' in str(response.url):
            '''
            Assigning default values to items 
            '''
            self.item['management_by_landlord'] = 'None'
            self.item['areacode'] = 'None'
            self.item['mobile_lister'] = 'None'
            self.item['google_place_id'] = 'None'
            self.item['Launch_date'] = '0'
            self.item['age'] = '0'
            self.item['address'] = 'None'
            self.item['sublocality'] = 'None'
            self.item['lat'] = '0'
            self.item['longt'] = '0'
            self.item['price_per_sqft'] = '0'
            self.item['listing_by'] = 'None'
            self.item['name_lister'] = 'None'
            self.item['scraped_time'] = dt.now().strftime('%m/%d/%Y %H:%M:%S')
            self.item['listing_date'] = dt.now().strftime('%m/%d/%Y %H:%M:%S')
            self.item['updated_date'] = self.item['listing_date']

            self.item['platform'] = 'commonfloor'

            self.item['city'] = 'Kolkata'

            self.item['data_id'] = response.url.split('/')[-1]

            self.item['locality'] = response.xpath(
                '//p[@class="proj-value location-name"]/text()').extract_first(
                )

            self.item['property_type'] = 'Residential'

            try:
                self.item['Building_name'] = response.xpath(
                    '//span[@class="subH1"]/text()').extract_first().split(
                        'in ')[-1].split(self.item['locality'])[0].replace(
                            'at ', '')
            except:
                self.item['Building_name'] = 'None'

            price = response.xpath(
                '//div[@class="project-detail row card"]/div/div[@class="row"]/div[1]/div[1]/p[@class="proj-value"]/span/text()'
            ).extract_first()
            if 'Lakh' in price:
                price = str(float(price.split(' Lakh')[0]) * 100000)
                self.item['Selling_price'] = price
                self.item['Monthly_Rent'] = '0'
            elif 'Crore' in price:
                price = str(float(price.split(' Crore')[0]) * 10000000)
                self.item['Selling_price'] = price
                self.item['Monthly_Rent'] = '0'
            else:
                self.item['Selling_price'] = '0'
                self.item['Monthly_Rent'] = '0'

            if ((self.item['Selling_price'] == '0')
                    and (self.item['Monthly_Rent'] == '0')):
                self.item['price_on_req'] = 'TRUE'
            else:
                self.item['price_on_req'] = 'FALSE'

            car_area = response.xpath(
                '//div[@class="project-detail row card"]/div/div[@class="row"]/div[1]/div[2]/p[@class="proj-value"]/text()'
            ).extract_first()
            if (not '-' in car_area):
                self.item['carpet_area'] = car_area.split(' ')[0]
            else:
                self.item['carpet_area'] = '0'

            self.item['config_type'] = response.xpath(
                '//div[@class="project-unit row card unit-detail-widget"]/div/div[@class="row"]/div/div[@class="row firstRow"]/div[1]/p/span[1]/text()'
            ).extract_first() + 'BHK'
            self.item['config_type'] = self.item['config_type'].replace(
                ' ', '')

            self.item['txn_type'] = response.xpath(
                '//div[@class="project-unit row card unit-detail-widget"]/div/div[@class="row"]/div/div[@class="row otherDetails"]/div[1]/p[2]/text()'
            ).extract_first()

            try:
                if 'New' in self.item['txn_type']:
                    self.item['txn_type'] = 'Sale'
                    dates = response.xpath(
                        '//div[@class="project-detail row card"]/div/div[@class="row"]/div[2]/div[1]/p[2]/text()'
                    ).extract_first().replace("'", '20')
                    self.item['Possession'] = dt.strftime(
                        dt.strptime(dates, "%b%Y"), "%m/%d/%Y %H:%M:%S")
                else:
                    self.item['Possession'] = '0'
            except:
                logging.log(logging.ERROR, dates)
                self.item['Possession'] = '0'

            self.item['Details'] = response.xpath(
                '//p[@class="propStatus"]/text()').extract()

            titl = response.xpath(
                '//div[@class="row otherDetails"]/*/p[@class="title"]/text()'
            ).extract()
            no = titl.index('Listed on')

            try:
                list1 = self.item['Details'][no]
                self.item['listing_date'] = dt.strftime(
                    dt.strptime(list1, "%d %b"), "%m/%d") + "/2016 00:00:00"
                self.item['updated_date'] = self.item['listing_date']
            except:
                try:
                    list1 = self.item['Details'][no - 1]
                    self.item['listing_date'] = dt.strftime(
                        dt.strptime(list1, "%d %b"),
                        "%m/%d") + "/2016 00:00:00"
                    self.item['updated_date'] = self.item['listing_date']
                except:
                    self.item['listing_date'] = dt.now().strftime(
                        '%m/%d/%Y %H:%M:%S')
                    self.item['updated_date'] = self.item['listing_date']

            self.item['Bua_sqft'] = [
                area for area in self.item['Details'] if 'Sq. Ft.' in area
            ][0].replace(' Sq. Ft.', '')

            self.item['Status'] = response.xpath(
                '//*[@id="page_container"]/div[2]/div/div/div[1]/div[1]/div[1]/div[2]/div/div/div/div[2]/div[1]/p[2]/@title'
            ).extract_first()

            #self.item['Status'] = [stat for stat in self.item['Details'] if (('Unfurnished' in stat) or ('semi' in stat) or ('fully' in stat))]
            #self.item['Status'] = ''.join(self.item['Status'])

            self.item['Details'] = ' '.join(self.item['Details'])

            try:
                listin = response.xpath(
                    '//*[@id="page_container"]/div[2]/div/div/div[1]/div[1]/div[1]/div[2]/div/div/div/div[2]/div[4]/p[2]/@title'
                ).extract_first()
                if 'Owner' in listin:
                    self.item['listing_by'] = 'Owner'
                elif 'Builder' in listin:
                    self.item['listing_by'] = 'Builder'
                elif 'Broker' in listin:
                    self.item['listing_by'] = 'Agent'
                elif 'Agent' in listin:
                    self.item['listing_by'] = 'Agent'
                else:
                    self.item['listing_by'] = 'None'

                self.item['name_lister'] = listin.split('(')[0]
            except:
                self.item['listing_by'] = 'None'
                self.item['name_lister'] = 'None'

            #self.item['scraped_time'] = dt.now().strftime('%m/%d/%Y %H:%M:%S')

            if (((not self.item['Monthly_Rent'] == '0') and
                 (not self.item['Bua_sqft'] == '0') and
                 (not self.item['Building_name'] == 'None') and
                 (not self.item['lat'] == '0'))
                    or ((not self.item['Selling_price'] == '0') and
                        (not self.item['Bua_sqft'] == '0') and
                        (not self.item['Building_name'] == 'None') and
                        (not self.item['lat'] == '0'))
                    or ((not self.item['price_per_sqft'] == '0') and
                        (not self.item['Bua_sqft'] == '0') and
                        (not self.item['Building_name'] == 'None') and
                        (not self.item['lat'] == '0'))):
                self.item['quality4'] = 1
            elif (((not self.item['price_per_sqft'] == '0') and
                   (not self.item['Building_name'] == 'None') and
                   (not self.item['lat'] == '0'))
                  or ((not self.item['Selling_price'] == '0') and
                      (not self.item['Bua_sqft'] == '0') and
                      (not self.item['lat'] == '0'))
                  or ((not self.item['Monthly_Rent'] == '0') and
                      (not self.item['Bua_sqft'] == '0') and
                      (not self.item['lat'] == '0'))
                  or ((not self.item['Selling_price'] == '0') and
                      (not self.item['Bua_sqft'] == '0') and
                      (not self.item['Building_name'] == 'None'))
                  or ((not self.item['Monthly_Rent'] == '0') and
                      (not self.item['Bua_sqft'] == '0') and
                      (not self.item['Building_name'] == 'None'))):
                self.item['quality4'] = 0.5
            else:
                self.item['quality4'] = 0
            if ((not self.item['Building_name'] == 'None')
                    and (not self.item['listing_date'] == '0')
                    and (not self.item['txn_type'] == 'None')
                    and (not self.item['property_type'] == 'None')
                    and ((not self.item['Selling_price'] == '0') or
                         (not self.item['Monthly_Rent'] == '0'))):
                self.item['quality1'] = 1
            else:
                self.item['quality1'] = 0

            if ((not self.item['Launch_date'] == '0')
                    or (not self.item['Possession'] == '0')):
                self.item['quality2'] = 1
            else:
                self.item['quality2'] = 0

            if ((not self.item['mobile_lister'] == 'None')
                    or (not self.item['listing_by'] == 'None')
                    or (not self.item['name_lister'] == 'None')):
                self.item['quality3'] = 1
            else:
                self.item['quality3'] = 0

            yield self.item
# Put the folder "data/adaboost" from this repository into metabo/environment/hpo/data

import os
from metabo.eval.evaluate import eval_experiment
from metabo.eval.plot_results import plot_results
from metabo.environment.hpo.prepare_data import prepare_hpo_data
from metabo.policies.taf.generate_taf_data_hpo import generate_taf_data_hpo
from gym.envs.registration import register, registry
from datetime import datetime

# set evaluation parameters
afs_to_evaluate = ["MetaBO", "TAF-ME", "TAF-RANKING", "EI"]
rootdir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "metabo")
logpath = os.path.join(rootdir, "iclr2020", "hpo", "MetaBO-ADABOOST-v0")
savepath = os.path.join(logpath, "eval",
                        datetime.strftime(datetime.now(), "%Y-%m-%d-%H-%M-%S"))
n_workers = 1
n_episodes = 15  # 15 test sets

prepare_hpo_data(model="adaboost",
                 datapath=os.path.join(rootdir, "environment", "hpo", "data",
                                       "adaboost"))

# evaluate all afs
for af in afs_to_evaluate:
    # set af-specific parameters
    if af == "MetaBO":
        features = [
            "posterior_mean", "posterior_std", "timestep", "budget", "x"
        ]
        pass_X_to_pi = False
Beispiel #54
0
    def process_pending_domains(self):
        '''
        process pending domain
        - get list domains from kdata cdn
        - check is exist in script_data -> next
        - insert into script_data in wait_call_cdn_node mode
        '''

        # read cdn node vhost template
	with open(self.vhost_template_file) as f:
	    template_data = f.read()

        # get all kdata cdn domains
        domains = self.get_domains_from_api('0')
        for item in domains:
            '''
            {u'status': 1, u'domain': u'cdn.1ly.co', u'user_id': 10, u'origin_server': u'http://kdata.1ly.co', u'created_at': u'2016-10-26 11:45:25', u'messages': u'domain.err_domain', u'updated_at': u'2016-10-27 11:09:01', u'kdata_domain': u'cdn-1ly-co-06601832710.kdatacdn.net', u'id': 7}
            '''
            domain = item['domain']
            kdata_domain = item['kdata_domain']
            origin_server = item['origin_server']
            kdata_id = item['id']

            # domain existed in script db?
            check_domain = self._db_domain_fetch_one_by_kdata_id(kdata_id)
            if check_domain:
                self._log('domain {} existed with data {}'.format(domain, check_domain))
                continue

            # force check is valid domain
            if self.must_check_valid_domain and self.is_valid_domain(domain) == False:
                self._log('domain {} is invalid'.format(item['domain']))
                self.update_domain(item['id'], 1, 'domain.err_domain')
                continue

            # force check is valid origin
            if self.must_check_valid_origin and self.is_valid_origin(origin_server) == False:
                self._log('origin {} is invalid'.format(origin_server))
                self.update_domain(item['id'], 1, 'domain.err_origin_server')
                continue

            origin_server = self.get_cdn_origin(origin_server, domain)
            if origin_server is None:
                self._log('origin {} is invalid'.format(origin_server))
                self.update_domain(item['id'], 1, 'domain.err_origin_server')
                continue

            replaces = {
                '$domain': kdata_domain,
                '$origin_ip:port': origin_server,
            }

            # cdn node vhost data
            per_domain_vhost_file = '{}.yml'.format(kdata_domain)
            per_domain_template_data = template_data
            for find, repl in replaces.items():
                per_domain_template_data = per_domain_template_data.replace(find, repl)

            ''' create yml file'''
            with open(per_domain_vhost_file, 'w') as f:
                f.write(per_domain_template_data)

            ''' update db'''
            script_data = {
                'user_domain': domain
                ,'kdata_domain': kdata_domain
                ,'domain_id': str(kdata_id)
                ,'status': 'wait_call_cdn_node'
                ,'origin': item['origin_server']
                ,'comment': 'inserted from script at {}'.format(datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M:%S'))
            }
            self._db_domain_add(script_data)

            cfg = {
                'domain': kdata_domain
                ,'zone_type': 'A'
                ,'ip': ['123.30.171.238', '127.0.0.1']
                ,'proxy': False
            }

            self.kcf.add_config(cfg)
            res = self.kcf.add_record()
            if not res['status']:
                self.push_noti('[ERROR] Domain {} is existed --- {}'.format(kdata_domain, res))
            else:
                self.push_noti('[OK] Domain {} added'.format(kdata_domain))

            #push_ansible here
            kansible = Kansible({'playbook_path': '/path/to/playbook.yml'})
            kansible.test()
Beispiel #55
0
def fetch_incidents_command():
    """
    Parent command to wrap events and behavior fetch commands
    """

    # Check if it's been run
    now = datetime.today()
    yesterday = datetime.strftime(now - timedelta(days=1), "%Y-%m-%d")
    last_run = demisto.getLastRun()
    start_date = yesterday
    end_date = yesterday

    if "start_time" not in last_run or "complete_for_today" not in last_run:
        # first time integration is running
        start_date = datetime.strftime(now - timedelta(days=FIRST_RUN),
                                       "%Y-%m-%d")

    if last_run.get('complete_for_today') is True and last_run.get(
            'start_time') == yesterday:
        # wait until tomorrow to try again
        demisto.incidents([])
        return

    # Refresh JWT
    token = do_auth()

    # Fetch Events
    more_events = True
    page_token = None
    incidents = []

    while more_events:
        event_incidents, page_token = fetch_events_incidents_command(
            start_date, end_date, token, page_token)
        for incident in event_incidents:
            demisto.debug(
                "Adding event incident name={name}, type={type}, severity={severity}"
                .format(**incident))
        incidents += event_incidents
        if page_token is None:
            more_events = False

    # Fetch Behavior
    if BEHAVIOR_ENABLED:
        more_behavior = True
        next_offset = None

        while more_behavior:
            behavior_incidents, next_offset = fetch_behavior_incidents_command(
                start_date, token, next_offset)
            for incident in behavior_incidents:
                demisto.debug(
                    "Adding behavior incident name={name}, type={type}, severity={severity}"
                    .format(**incident))
            incidents += behavior_incidents
            if next_offset is None:
                more_behavior = False

    # Send all Incidents
    demisto.incidents(incidents)

    # Save last_run
    demisto.setLastRun({"complete_for_today": True, "start_time": yesterday})
Beispiel #56
0
def update_db(shadow_dir, station_list, dfile='shadows_data.db'):
    '''
     Update both tables: SHADOWS and STATIONS
    '''

    conn = sqlite3.connect(dfile)
    c = conn.cursor()
    today = datetime.strftime(datetime.now(), "%Y/%m/%d")
    #header :station_id,station_name,lon,lat
    new_stations = pd.read_csv(station_list, header=None)
    new_stations.columns = ['station_id', 'station_name', 'lon', 'lat']
    new_stations["Date"] = [today] * new_stations.shape[0]
    sql_com = "SELECT * FROM STATIONS"
    current_stations = pd.read_sql(sql_com, conn)
    for station in current_stations.station_id.values:
        new_stations.drop(
            new_stations.index[new_stations['station_id'] == station],
            inplace=True)
    if not new_stations.empty:
        #if it found something new, update sql data base
        print("Updating STATIONS table")
        new_stations.to_sql('STATIONS', conn, if_exists='append', index=False)
    else:
        print("No new stations added to STATIONS table")
        return
    #read the database again if it was updated
    current_stations = pd.read_sql(sql_com, conn)
    sql_com = "SELECT * FROM SHADOWS"
    shadow_old = pd.read_sql(sql_com, conn)
    #extract the info from the ifile
    from os.path import normpath, basename
    dir_info = basename(normpath(shadow_dir))
    #extract data from the name of directory
    maxdist, res, horstep, dummy = dir_info.replace("lh_", "").split("_")
    stations = []
    print("Checking for new data for SHADOWS table")
    for ifile in sorted(os.listdir(shadow_dir)):
        if ifile.startswith("lh_"):  #will probably find shadows.log here
            station = int(ifile.replace("lh_", "").split("_")[1])
            get_station = current_stations[current_stations['station_id'] ==
                                           station]
            if get_station.empty:
                print("Station %d not yet in STATIONS table" % station)
            else:
                print("Getting SHADOWS for station %d" % station)
                print("Reading shadows from %s" %
                      os.path.join(shadow_dir, ifile))
                read_shadows = pd.read_csv(os.path.join(shadow_dir, ifile),
                                           index_col=False)
                size = read_shadows.shape[0]
                az = read_shadows.azimuth.to_list()
                hor = read_shadows.horizon_height.to_list()
                lon = get_station.lon.values[0]
                lat = get_station.lat.values[0]
                station_name = get_station.station_name.values[0]
                shadow_new = pd.DataFrame({
                    "station_id": [station] * size,
                    "station_name": [station_name] * size,
                    "resolution": [res] * size,
                    "maxdistance": [maxdist] * size,
                    "horizonstep": [horstep] * size,
                    "azimuth": az,
                    "horizon_height": hor,
                    "Date": [today] * size
                })
                if shadow_old.empty:
                    shadow_new.to_sql('SHADOWS',
                                      conn,
                                      if_exists='append',
                                      index=False)
                else:
                    #drop from the new data any stations already in old data
                    for station in shadow_old.station_id.values:
                        shadow_new.drop(shadow_new.index[
                            shadow_new['station_id'] == station],
                                        inplace=True)
                    if not shadow_new.empty:
                        shadow_new.to_sql('SHADOWS',
                                          conn,
                                          if_exists='append',
                                          index=False)
                    else:
                        print("No new data added to the SHADOWS table")
    print("database updated")
    c.execute('''
    INSERT INTO DAILY_STATUS (station_id,station_name,Date) 
    SELECT DISTINCT clt.station_id, ctr.station_name, clt.Date
    FROM STATIONS clt
    LEFT JOIN SHADOWS ctr ON clt.station_id = ctr.station_id
          ''')

    c.execute('''
    SELECT DISTINCT *
    FROM DAILY_STATUS
    WHERE Date = (SELECT max(Date) FROM DAILY_STATUS)
          ''')
    df = DataFrame(c.fetchall(),
                   columns=['station_id', 'station_name', 'Date'])
    print("New data")
    print(df)
Beispiel #57
0
    "Miramax", "DreamWorks", "New Line", "MGM", "Lionsgate"
]

STB = 10

ENTRIES = 100


def random_time():
    start = datetime.strptime('2017-04-01 12:00', '%Y-%m-%d %H:%M')
    end = datetime.strptime('2017-04-21 12:00', '%Y-%m-%d %H:%M')
    delta = end - start
    int_delta = (delta.days * 24 * 60 * 60) + delta.seconds
    random_second = random.randrange(int_delta)
    return start + timedelta(seconds=random_second)


count = 0

while count < ENTRIES:
    with open(FILENAME, 'a') as file:
        box_id = 'stb' + str(random.randint(1, STB))
        title = random.choice(TITLES)
        provider = random.choice(PROVIDERS)
        time = random_time()
        revenue = float(random.randint(1, 10))
        line = box_id + '|' + title + '|' + provider + '|' + datetime.strftime(time, '%Y-%m-%d') \
               + '|' + '{0:.2f}'.format(revenue) + '|' + datetime.strftime(time, '%H:%M') + '\n'
        file.write(line)
        count += 1
Beispiel #58
0
def convert_date(date_str):
    objDate = datetime.strptime(date_str, '%Y-%m-%d')
    return datetime.strftime(objDate, '%d %b %Y')
Beispiel #59
0
def Community():
    user = request.form.get('userid')
    page = request.form.get('page')
    if not page:
        page = 1
    else:
        page = int(page)
    if request.method == 'POST' and request.form.get('sort') == '0':
        # 默认是时间顺序排列,和post上来的是0的时候,就热度排序,
        # A = Blog.query.order_by(Blog.num_of_view.desc()).paginate(page, per_page=10,
        #                                                           error_out=False).items  # desc()是从大到小,没有desc就是从小到大
        A = db.session.query(Blog, User).join(
            User, Blog.user_id == User.id).order_by(
                Blog.num_of_view.desc()).paginate(page,
                                                  per_page=10,
                                                  error_out=False).items

        payload = []
        content = {}
        for AA in A:
            # name = User.query.get(AA.user_id).nickname
            # face = User.query.get(AA.user_id).face
            # datetime = AA.create_time
            # time = datetime.strftime('%Y-%m-%d %H:%M:%S')
            # content = {'id': AA.id, 'title': AA.title, 'authorid': AA.user_id, 'pageviews': AA.num_of_view,
            #            'time': time, "author": name, "avatar": face}

            name = AA[1].nickname
            face = AA[1].face
            time = AA[0].create_time.strftime('%Y-%m-%d %H:%M:%S')
            content = {
                'id': AA[0].id,
                'title': AA[0].title,
                'authorid': AA[0].user_id,
                'pageviews': AA[0].num_of_view,
                'time': time,
                "author": name,
                "avatar": face
            }
            payload.append(content)
            content = {}
        data = {"data": payload}
        payload = json.dumps(data)
        return payload, 200

    if request.method == 'POST' and request.form.get('sort') == '2':
        if user == None:
            return "error"
        # 默认是时间顺序排列,和post上来的是2的时候,就只看关注的
        # 双表联立查询,将博客的表和关注的表联立起来,返回的是一个列表数组
        userblogs = db.session.query(Blog, Follow).join(
            Follow, Blog.user_id == Follow.followed_id).filter(
                Follow.follower_id == user).paginate(page,
                                                     per_page=10,
                                                     error_out=False).items
        payload = []
        content = {}
        for blog in userblogs:
            AA = blog[0]  # 找到对应的博客
            name = User.query.get(AA.user_id).nickname
            face = User.query.get(AA.user_id).face
            datetime = AA.create_time
            time = datetime.strftime('%Y-%m-%d %H:%M:%S')
            content = {
                'id': AA.id,
                'title': AA.title,
                'authorid': AA.user_id,
                'pageviews': AA.num_of_view,
                'time': time,
                "author": name,
                "avatar": face
            }
            payload.append(content)
            content = {}
        data = {"data": payload}
        payload = json.dumps(data)
        return payload, 200

    # 新发布的文章时间比较大,就先出现用desc从大到小排序
    # A = Blog.query.order_by(Blog.create_time.desc()).paginate(page, per_page=10, error_out=False).items
    A = db.session.query(Blog, User).join(
        User, Blog.user_id == User.id).order_by(
            Blog.create_time.desc()).paginate(page,
                                              per_page=10,
                                              error_out=False).items
    payload = []
    content = {}
    for AA in A:
        # name = User.query.get(AA.user_id).nickname
        # face = User.query.get(AA.user_id).face
        # datetime = AA.create_time
        name = AA[1].nickname
        face = AA[1].face
        datetime = AA[0].create_time

        time = datetime.strftime('%Y-%m-%d %H:%M:%S')
        # content = {'id': AA.id, 'title': AA.title, 'author': name, 'pageviews': AA.num_of_view,
        #            'time': time, "authorid": AA.user_id, "avatar": face}
        content = {
            'id': AA[0].id,
            'title': AA[0].title,
            'author': name,
            'pageviews': AA[0].num_of_view,
            'time': time,
            "authorid": AA[0].user_id,
            "avatar": face
        }
        payload.append(content)
        content = {}
    data = {"data": payload}
    payload = json.dumps(data)
    return payload, 200
    def parse(self, response):
        record = Selector(response)

        data = record.xpath('//div[contains(@id,"resultBlockWrapper")]')

        for i in data:
            item = MagicbuyhydraItem()

            item['name_lister'] = 'None'
            item['Details'] = 'None'
            item['listing_by'] = 'None'
            item['address'] = 'None'
            item['sublocality'] = 'None'
            item['age'] = '0'
            item['google_place_id'] = 'None'
            item['lat'] = '0'
            item['longt'] = '0'
            item['Possession'] = '0'
            item['Launch_date'] = '0'
            item['mobile_lister'] = 'None'
            item['areacode'] = 'None'
            item['management_by_landlord'] = 'None'
            item['monthly_rent'] = '0'
            item['price_per_sqft'] = '0'

            item['scraped_time'] = dt.now().strftime('%m/%d/%Y %H:%M:%S')

            item['city'] = 'Hyderabad'

            item['Building_name'] = i.xpath('div/input[contains(@id,"projectName")]/@value').extract_first()
            if item['Building_name'] == '':
                item['Building_name'] = 'None'

            try:
                item['longt'] = i.xpath('div/div[@class="srpColm2"]/div[@class="proColmleft"]/div[@class="proNameWrap proNameWrapBuy"]/div[@class="proNameColm1"]/span[@class="seeOnMapLink seeOnMapLinkBuy"]/span[@class="stopParentLink"]/@onclick').extract_first().split('&')[0].split('?')[-1].split("=")[-1] # .xpath('.//input[@itemprop="latitude"]/@value').extract_first()
                if item['longt'] == '':
                    item['longt'] = '0'
            except:
                item['longt'] = '0'

            try:
                item['lat'] = i.xpath('div/div[@class="srpColm2"]/div[@class="proColmleft"]/div[@class="proNameWrap proNameWrapBuy"]/div[@class="proNameColm1"]/span[@class="seeOnMapLink seeOnMapLinkBuy"]/span[@class="stopParentLink"]/@onclick').extract_first().split('&')[1].split("=")[-1]
                if item['lat'] == '':
                    item['lat'] = '0'
            except:
                item['longt'] = '0'

            item['platform'] = 'magicbricks'
            item['carpet_area'] = '0'

            ids = i.xpath('@id').extract_first()
            item['data_id'] = re.findall('[0-9]+', ids)[0]

            item['config_type'] = i.xpath('.//input[contains(@id,"bedroomVal")]/@value').extract_first().replace('>', '')
            item['config_type'] = item['config_type']+'BHK'

            item['property_type'] = i.xpath('.//p[@class="proHeading"]/a/input[2]/@value').extract_first()
            if (item['property_type'] == 'Studio Apartment'):
                item['config_type'] = '1RK'

            ####### config_type is assumed as 'None' for default #########

            if item['config_type'] == 'BHK':
                item['config_type'] = 'None'
            
            try:
                sqf = i.xpath('.//input[contains(@id,"propertyArea")]/@value').extract_first()
                if 'sqft' in sqf:
                    item['Bua_sqft'] = re.findall('[0-9]+', sqf)
                elif 'kottah' in sqf:
                    item['Bua_sqft'] = re.findall('[0-9]+', sqf)
                    item['Bua_sqft'] = str(eval(item['Bua_sqft'][0]) * 720)
                else:
                    item['Bua_sqft'] = '0'
            except:             
                item['Bua_sqft'] = '0'

            item['Locality'] = i.xpath('div/div[@class="srpColm2"]/div[@class="proColmleft"]/div[1]/div/p/a/abbr/span[1]/span/text()').extract_first()


            stat = i.xpath('.//div[1]/div[2]/div[1]/div[2]/div[1]/text()').extract()
            #print("STAT: ", stat)
            try:               
                if 'Under' in stat:
                    item['Status'] = 'Under Construction'
                    poss = stat.split('Ready by ')[-1].replace("'", "").replace(')','').replace('\n', '').replace('. Freehold', '')
                    #print("POSSESSION: ", poss)
                    yr = str(re.findall('[0-9]+', poss))
                    yr = yr.replace('[', '').replace(']', '').replace('u', '').replace("'", "")
                    if 'Jan' in poss:
                        item['Possession'] = '01/01/' + yr + ' 00:00:00'
                    if 'Feb' in poss:
                        item['Possession'] = '01/02/' + yr + ' 00:00:00'
                    if 'Mar' in poss:
                        item['Possession'] = '01/03/' + yr + ' 00:00:00'
                    if 'Apr' in poss:
                        item['Possession'] = '01/04/' + yr + ' 00:00:00'
                    if 'May' in poss:
                        item['Possession'] = '01/05/' + yr + ' 00:00:00'
                    if 'Jun' in poss:
                        item['Possession'] = '01/06/' + yr + ' 00:00:00'
                    if 'Jul' in poss:
                        item['Possession'] = '01/07/' + yr + ' 00:00:00'
                    if 'Aug' in poss:
                        item['Possession'] = '01/08/' + yr + ' 00:00:00'
                    if 'Sep' in poss:
                        item['Possession'] = '01/09/' + yr + ' 00:00:00'
                    if 'Oct' in poss:
                        item['Possession'] = '01/10/' + yr + ' 00:00:00'
                    if 'Nov' in poss:
                        item['Possession'] = '01/11/' + yr + ' 00:00:00'
                    if 'Dec' in poss:
                        item['Possession'] = '01/12/' + yr + ' 00:00:00'                    
                else:
                    item['Status'] = 'Ready to move'
                    item['Possession'] = '0'
            except:
                item['Possession'] = '0'
                item['Status'] = 'Ready to move'


            price = i.xpath('.//div/div[@class="srpColm2"]/div[@class="proColmRight"]/div/div/div/span/text()').extract_first()
            #print("PRICE: ", price)
            if price == None:
                price = i.xpath('.//span[contains(@id,"sqrFtPriceField")]/text()').extract_first()
                price = ''.join(re.findall('[0-9]+', price))
                #print("PRICE: ", price)
            if not price == None:
                if 'Lac' in price:
                    item['Selling_price'] = str(float(price.split()[0])*100000)
                elif 'Cr' in price:
                    item['Selling_price'] = str(float(price.split()[0])*10000000)
                else:
                    item['Selling_price'] = '0'
                if item['Selling_price'] == 'None':
                    item['Selling_price'] = '0'
            else:
                item['Selling_price'] = '0'

            if item['Selling_price'] == '0':
                item['price_on_req'] = 'true'
            else:
                item['price_on_req'] = 'false'

            try:
                sqft_per = i.xpath('div/div[@class="srpColm2"]/div[@class="proColmRight"]/div[@class="proPriceColm2"]/div[@class="proPriceColm2"]/div[@class="sqrPrice"]/span[@class="sqrPriceField"]/text()').extract_first()
                if sqft_per:
                    item['price_per_sqft'] = ''.join(re.findall('[0-9]+', sqft_per))
                else:
                    item['price_per_sqft'] = '0'
                if 'kottah' in sqf:
                    item['price_per_sqft'] = str(eval(item['price_per_sqft']) / 720)
            except:
                item['price_per_sqft'] = '0'
            
            
            try:
                item['listing_by'] = i.xpath('.//div[@class="proAgentWrap"]/div[1]/div/div[1]/text()').extract_first()
                item['name_lister'] = i.xpath('.//div[@class="proAgentWrap"]/div[@class="comNameElip"]/text()').extract_first().replace("\n", "")
            except:
                item['listing_by'] = 'None'
                item['name_lister'] = 'None'

            item['txn_type'] = i.xpath('div/input[contains(@id,"transactionType")]/@value').extract_first()

            day = i.xpath('div/input[contains(@id,"createDate")]/@value').extract_first()
            try:
                item['listing_date'] = dt.strftime(dt.strptime(day, "%b %d, '%y"), '%m/%d/%Y %H:%M:%S')
                item['updated_date'] = item['listing_date']
            except:
                item['listing_date'] = 0
                item['updated_date'] = item['listing_date']

            if (((not item['Bua_sqft']=='0') and (not item['Building_name']=='None') and (not item['lat']=='0')) or ((not item['Selling_price'] == '0') and (not item['Bua_sqft']=='0') and (not item['Building_name']=='None') and (not item['lat']=='0')) or ((not item['price_per_sqft'] == '0') and (not item['Bua_sqft']=='0') and (not item['Building_name']=='None') and (not item['lat']=='0'))):
                item['quality4'] = 1
            elif (((not item['price_per_sqft'] == '0') and (not item['Building_name']=='None') and (not item['lat']=='0')) or ((not item['Selling_price'] == '0') and (not item['Bua_sqft']=='0') and (not item['lat']=='0')) or ((not item['Bua_sqft']=='0') and (not item['lat']=='0')) or ((not item['Selling_price'] == '0') and (not item['Bua_sqft']=='0') and (not item['Building_name']=='None')) or ((not item['Bua_sqft']=='0') and (not item['Building_name']=='None'))):
                item['quality4'] = 0.5
            else:
                item['quality4'] = 0
            if ((not item['Building_name'] == 'None') and (not item['listing_date'] == '0') and (not item['txn_type'] == 'None') and (not item['property_type'] == 'None') and ((not item['Selling_price'] == '0'))):
                item['quality1'] = 1
            else:
                item['quality1'] = 0

            if ((not item['Launch_date'] == '0') and (not item['Possession'] == '0')):
                item['quality2'] = 1
            else:
                item['quality2'] = 0

            if ((not item['mobile_lister'] == 'None') or (not item['listing_by'] == 'None') or (not item['name_lister'] == 'None')):
                item['quality3'] = 1
            else:
                item['quality3'] = 0
            yield item