Example #1
0
    def _check_cookie(self, cookie):
        """ Check our cookie, possibly returning the session id
        """
        expiretime = self._find_chunk(cookie, 'expires=', ' GMT')
        dict = {}

        print 'sabnzbd-xbmc timecheck'
        try:
            # Day, dd-mmm-yyyy hh:mm:ss
            t = time.strptime(expiretime, '%a, %d-%b-%Y %H:%M:%S')
        except ValueError:
            # Day, dd mmm yyyy hh:mm:ss
            print 'sabnzbd-xbmc timecheck failed!'
            t = time.strptime(expiretime, '%a, %d %b %Y %H:%M:%S')

        print 'sabnzbd-xbmc checking if expired!'
        now = time.gmtime()
        # Woops, expired
        if now > t: 
            print 'sabnzbd-xbmc expired!'
            return {}

        else:
            print 'sabnzbd-xbmc extract session from cookie!'
            dict['NzbSessionID'] = self._find_chunk(cookie, 'NzbSessionID=', ';')
            dict['NzbSmoke'] = self._find_chunk(cookie, 'NzbSmoke=', ';')
            print 'sabnzbd-xbmc return session info!'
            return dict
Example #2
0
def validate_data(info):

    hack_license,pick_datetime,drop_datetime,n_passengers,trip_dist,pick_long,\
    pick_lat,drop_long,drop_lat,payment_type,fare_amount,\
    surcharge,tip_amount,mta_tax,tolls_amount,total_amount=info

    time_in_seconds = time.mktime(time.strptime(drop_datetime,'%Y-%m-%d %H:%M:%S'))-\
                      time.mktime(time.strptime(pick_datetime,'%Y-%m-%d %H:%M:%S'))
    try:
        pick_long = float(pick_long.strip())
        pick_lat = float(pick_lat.strip())
        drop_long = float(drop_long.strip())
        drop_lat = float(drop_lat.strip())
        trip_dist = float(trip_dist.strip())
        total_amount = float(total_amount.strip())
        n_passengers = int(n_passengers.strip())
    except ValueError:
        sys.stderr.write('CASTING TO FLOATS FAILED')
        return False
    # Is the straight distance shorter than the reported distance?
    euclidean = validate_euclidean(trip_dist,pick_long,pick_lat,drop_long,drop_lat)
    gps_pickup = validate_gps(pick_long,pick_lat) # Are the GPS coordinates present in Manhattan
    gps_dropoff = validate_gps(drop_long,drop_lat)
    distance = validate_distance(trip_dist,pick_long,pick_lat,drop_long,drop_lat) # Are distances too big
    val_time = validate_time(time_in_seconds) # Are times too long or 0? Are they positive?
    velocity = validate_velocity(time_in_seconds,trip_dist) # Is velocity too out of reach
    amount = validate_amount(total_amount)
    pass_validate = validate_passengers(n_passengers)

    return(euclidean and gps_pickup and gps_dropoff and distance and val_time and velocity and amount and pass_validate)
Example #3
0
def time_comparison(project_pid):
    end_time_format = ""
    end_time = _time_call(project_pid,"etime")
    end_time_dict = end_time.split(":")


    if end_time_dict:
        if len(end_time_dict) == 2:
            ent_time_format = "%M:%S"
        elif len(end_time_dict) == 3:
            if len(end_time_dict[0].split("-")) == 2:
                ent_time_format = "%U-%H:%M:%S"
            else:
                ent_time_format = "%H:%M:%S"

    time_define =  time.strptime("00:05:00","%H:%M:%S")
    if end_time_format is not None:
        if time_define > time.strptime(end_time,ent_time_format):
            print_highlight(package_name),
            print "running time %s" %(end_time),
            print_success()
        else:
            print_highlight(package_name),
            print "running time %s" %(end_time),
            print_failure()
Example #4
0
	def codyFileUpdates(self, trigger):     

		if '!cody.updates' in trigger\
		and self.POST_CODY_FILE_UPDATES:
			self.POST_CODY_FILE_UPDATES = False
			self.IRC.send('PRIVMSG '+self.MSG_CHANNEL+' :File updates are now turned off.\r\n')

		elif '!cody.updates' in trigger\
		and not self.POST_CODY_FILE_UPDATES:
			self.POST_CODY_FILE_UPDATES = True
			self.IRC.send('PRIVMSG '+self.MSG_CHANNEL+' :File updates are now turned on.\r\n')

		elif 'codyFileUpdates' in trigger:

			if (datetime.datetime.now().second % 3) == 0 : 
				
				#fetch current file update time - syntax is 'Sun Apr 21 20:24:36 2013'
				lastUpdatedRealTime = time.ctime(os.path.getmtime(self.FILE_PATH))
				lastUpdatedCody 	= lastUpdatedRealTime.split()
				
				#fetch last recorded update time
				lastRecorded 		= open("resources/lastupdated.db", "r")
				lastRecordedCody	= lastRecorded.read().split()
				lastRecorded.close()
				
				#parse recorded time into datetime ints
				if len(lastRecordedCody) == 5:
					lastRecordedYear 	= int(lastRecordedCody[4])
					lastRecordedMonth   = int(time.strptime(lastRecordedCody[1],'%b').tm_mon)
					lastRecordedDay		= int(lastRecordedCody[2])
					lastRecordedTime 	= lastRecordedCody[3].split(':')
					lastRecordedHour	= int(lastRecordedTime[0])
					lastRecordedMinute	= int(lastRecordedTime[1])
					lastRecordedSecond	= int(lastRecordedTime[2])

				#parse current time into datetime ints
				lastUpdatedYear 	= int(lastUpdatedCody[4])
				lastUpdatedMonth    = int(time.strptime(lastUpdatedCody[1],'%b').tm_mon)
				lastUpdatedDay		= int(lastUpdatedCody[2])
				lastUpdatedTime 	= lastUpdatedCody[3].split(':')
				lastUpdatedHour		= int(lastUpdatedTime[0])
				lastUpdatedMinute	= int(lastUpdatedTime[1])
				lastUpdatedSecond	= int(lastUpdatedTime[2])

				#turn them both into datetime objects
				if len(lastRecordedCody) == 5:
					lastRecordedCody 	= datetime.datetime(lastRecordedYear, lastRecordedMonth, lastRecordedDay, lastRecordedHour, lastRecordedMinute, lastRecordedSecond)

				else: 
					lastRecordedCody 	= datetime.datetime(2000,1,1)
				
				lastUpdatedCody 	= datetime.datetime(lastUpdatedYear,  lastUpdatedMonth,  lastUpdatedDay,  lastUpdatedHour, 	lastUpdatedMinute, 	lastUpdatedSecond)

				#check if it has changed
				if lastRecordedCody < lastUpdatedCody :
					if self.POST_CODY_FILE_UPDATES:
						self.IRC.send('PRIVMSG '+self.HOME_CHANNEL+' :'+self.FILE_NAME+' has been updated.\r\n')
					dbWrite = open("resources/lastupdated.db", "w")
					dbWrite.write(lastUpdatedRealTime)
					dbWrite.close()
Example #5
0
File: common.py Project: hpc2n/bart
def getSeconds(time_str):
    """
    Convert a string of the form '%d-%H:%M:%S', '%H:%M:%S' or '%M:%S'
    to seconds.
    """
    # sometimes the timestamp includs a fractional second part
    time_str = time_str.split('.')[0]

    if '-' in time_str:
        days, time_str = time_str.split('-')
        st = time.strptime(time_str, '%H:%M:%S')
        sec = int(days)*86400+st.tm_hour*3600+st.tm_min*60+st.tm_sec
    else:
        try:
            st = time.strptime(time_str, '%H:%M:%S')
            sec = st.tm_hour*3600+st.tm_min*60+st.tm_sec
        except ValueError:
            try:
                st = time.strptime(time_str, '%M:%S')
                sec = st.tm_min*60+st.tm_sec
            except ValueError:
                logging.error('String: %s does not match time format.' % time_str)
                return -1

    return sec
Example #6
0
    def _createSearchRequest(self, search=None, tags=None,
                             notebooks=None, date=None,
                             exact_entry=None, content_search=None):

        request = ""
        if notebooks:
            for notebook in tools.strip(notebooks.split(',')):
                if notebook.startswith('-'):
                    request += '-notebook:"%s" ' % tools.strip(notebook[1:])
                else:
                    request += 'notebook:"%s" ' % tools.strip(notebook)

        if tags:
            for tag in tools.strip(tags.split(',')):

                if tag.startswith('-'):
                    request += '-tag:"%s" ' % tag[1:]
                else:
                    request += 'tag:"%s" ' % tag

        if date:
            date = tools.strip(date.split('-'))
            try:
                dateStruct = time.strptime(date[0] + " 00:00:00", "%d.%m.%Y %H:%M:%S")
                request += 'created:%s ' % time.strftime("%Y%m%d", time.localtime(time.mktime(dateStruct)))
                if len(date) == 2:
                    dateStruct = time.strptime(date[1] + " 00:00:00", "%d.%m.%Y %H:%M:%S")
                request += '-created:%s ' % time.strftime("%Y%m%d", time.localtime(time.mktime(dateStruct) + 60 * 60 * 24))
            except ValueError, e:
                out.failureMessage('Incorrect date format in --date attribute. '
                                   'Format: %s' % time.strftime("%d.%m.%Y", time.strptime('19991231', "%Y%m%d")))
                return tools.exitErr()
Example #7
0
    def add_separator(self, timestamp):
        '''Add whitespace and timestamp between chat sessions.'''
        time_with_current_year = \
            (time.localtime(time.time())[0], ) + \
            time.strptime(timestamp, '%b %d %H:%M:%S')[1:]

        timestamp_seconds = time.mktime(time_with_current_year)
        if timestamp_seconds > time.time():
            time_with_previous_year = \
                (time.localtime(time.time())[0] - 1, ) + \
                time.strptime(timestamp, '%b %d %H:%M:%S')[1:]
            timestamp_seconds = time.mktime(time_with_previous_year)

        message = TextBox(self,
                          style.COLOR_BUTTON_GREY, style.COLOR_BUTTON_GREY,
                          style.COLOR_WHITE, style.COLOR_BUTTON_GREY, False,
                          None, timestamp_to_elapsed_string(timestamp_seconds))
        self._message_list.append(message)
        box = Gtk.HBox()
        align = Gtk.Alignment.new(
            xalign=0.5, yalign=0.0, xscale=0.0, yscale=0.0)
        box.pack_start(align, True, True, 0)
        align.show()
        align.add(message)
        message.show()
        self._conversation.attach(box, 0, self._row_counter, 1, 1)
        box.show()
        self._row_counter += 1
        self.add_log_timestamp(timestamp)
        self._last_msg_sender = None
Example #8
0
def do_offset(tuples_list, filename, format ='%b %d, %Y %H:%M:%S', offset_val=0):
    new_tuples_list = []
    firstval = time.strptime(tuples_list[0][0], format)
    if filename != "slide_timestamps.txt":
        def_time = 'Apr 01, 2000 00:00:00'
    else :
        def_time = 'Apr 01 2000 00:00:00'
    conversion_timer = time.mktime(time.strptime(def_time, format))

    for item in tuples_list:
        t= item[0]
        timer = time.strptime(t, format)  ##3,4,5
        timer = time.mktime(timer) - time.mktime(firstval) + conversion_timer + offset_val
        timer = time.strftime("%H:%M:%S",time.localtime(timer))
        if filename == "spectrum.txt":
            line_list = [timer]
            for i in json.loads(item[1]):
                line_list.append(i)
            #print line_list
            new_tuples_list.append(tuple(line_list))
            
        else:
            line_list = [timer]
            for i in item[1:]:
                line_list.append(i)
            #print line_list
            new_tuples_list.append(tuple(line_list))       
    return new_tuples_list
Example #9
0
    def get_expire_sec(self, morsel):
        expires = None

        if morsel.get('max-age'):
            return int(morsel['max-age'])

        expires = morsel.get('expires')
        if not expires:
            return None

        expires = expires.replace(' UTC', ' GMT')

        try:
            expires = time.strptime(expires, '%a, %d-%b-%Y %H:%M:%S GMT')
        except:
            pass

        try:
            expires = time.strptime(expires, '%a, %d %b %Y %H:%M:%S GMT')
        except:
            pass

        expires = time.mktime(expires)
        expires = expires - time.timezone - time.time()
        return expires
Example #10
0
def time_parse(s):
    try:
        epoch = int(s)
        return epoch
    except ValueError:
        pass

    try:
        epoch = int(calendar.timegm(time.strptime(s, '%Y-%m-%d')))
        return epoch
    except ValueError:
        pass

    try:
        epoch = int(calendar.timegm(time.strptime(s, '%Y-%m-%d %H:%M:%S')))
        return epoch
    except ValueError:
        pass

    m = re.match(r'^(?=\d)(?:(\d+)w)?(?:(\d+)d)?(?:(\d+)h)?(?:(\d+)m)?(?:(\d+)s?)?$', s, re.I)
    if m:
        return -1*(int(m.group(1) or 0)*604800 +  \
                int(m.group(2) or 0)*86400+  \
                int(m.group(3) or 0)*3600+  \
                int(m.group(4) or 0)*60+  \
                int(m.group(5) or 0))

    raise ValueError('Invalid time: "%s"' % s)
    def set_filter_date(self):

        dialog = xbmcgui.Dialog()
        if self.start_date == '':
            self.start_date = str(datetime.datetime.now())[:10]
        if self.end_date == '':
            self.end_date = str(datetime.datetime.now())[:10]

        try:
            d = dialog.numeric(1, common.getstring(30117) ,strftime("%d/%m/%Y",strptime(self.start_date,"%Y-%m-%d")) )
            if d != '':    
                self.start_date = strftime("%Y-%m-%d",strptime(d.replace(" ","0"),"%d/%m/%Y"))
            else:
                self.start_date =''
            common.log('', str(self.start_date))
            
            d = dialog.numeric(1, common.getstring(30118) ,strftime("%d/%m/%Y",strptime(self.end_date,"%Y-%m-%d")) )
            if d != '':
                self.end_date = strftime("%Y-%m-%d",strptime(d.replace(" ","0"),"%d/%m/%Y"))
            else:
                self.end_date =''
            common.log('', str(self.end_date))
        except:
            pass

        if self.start_date != '' or self.end_date != '':
            self.getControl( BUTTON_DATE ).setLabel( self.start_date + ' ... ' + self.end_date )
        else:
            self.getControl( BUTTON_DATE ).setLabel( common.getstring(30164) )
        self.getControl( BUTTON_DATE ).setVisible(False)
        self.getControl( BUTTON_DATE ).setVisible(True)        
Example #12
0
    def to_python(self, value):
        if value is None:
            return value
        if isinstance(value, datetime.datetime):
            return value
        if isinstance(value, datetime.date):
            return datetime.datetime(value.year, value.month, value.day)

        # Attempt to parse a datetime:
        value = smart_str(value)
        # split usecs, because they are not recognized by strptime.
        if '.' in value:
            try:
                value, usecs = value.split('.')
                usecs = int(usecs)
            except ValueError:
                raise exceptions.ValidationError(self.error_messages['invalid'])
        else:
            usecs = 0
        kwargs = {'microsecond': usecs}
        try: # Seconds are optional, so try converting seconds first.
            return datetime.datetime(*time.strptime(value, '%Y-%m-%d %H:%M:%S')[:6],
                                     **kwargs)

        except ValueError:
            try: # Try without seconds.
                return datetime.datetime(*time.strptime(value, '%Y-%m-%d %H:%M')[:5],
                                         **kwargs)
            except ValueError: # Try without hour/minutes/seconds.
                try:
                    return datetime.datetime(*time.strptime(value, '%Y-%m-%d')[:3],
                                             **kwargs)
                except ValueError:
                    raise exceptions.ValidationError(self.error_messages['invalid'])
Example #13
0
    def getWeatherDataCallback(self, result, errortext):
        self['statustext'].text = ''
        if result == MSNWeather.ERROR:
            self.error(errortext)
        else:
            self['caption'].text = self.weatherData.city
            self.webSite = self.weatherData.url
            for weatherData in self.weatherData.weatherItems.items():
                item = weatherData[1]
                if weatherData[0] == '-1':
                    self['currentTemp'].text = '%s\xc2\xb0%s' % (item.temperature, self.weatherData.degreetype)
                    self['condition'].text = item.skytext
                    self['humidity'].text = _('Humidity: %s %%') % item.humidity
                    self['wind_condition'].text = item.winddisplay
                    c = time.strptime(item.observationtime, '%H:%M:%S')
                    self['observationtime'].text = _('Observation time: %s') % time.strftime('%H:%M', c)
                    self['observationpoint'].text = _('Observation point: %s') % item.observationpoint
                    self['feelsliketemp'].text = _('Feels like %s') % item.feelslike + '\xc2\xb0' + self.weatherData.degreetype
                else:
                    index = weatherData[0]
                    c = time.strptime(item.date, '%Y-%m-%d')
                    self['weekday%s' % index].text = '%s\n%s' % (item.day, time.strftime('%d. %b', c))
                    lowTemp = item.low
                    highTemp = item.high
                    self['weekday%s_temp' % index].text = '%s\xc2\xb0%s|%s\xc2\xb0%s\n%s' % (highTemp,
                     self.weatherData.degreetype,
                     lowTemp,
                     self.weatherData.degreetype,
                     item.skytextday)

        if self.weatherPluginEntryIndex == 1 and WeatherMSNComp is not None:
            WeatherMSNComp.updateWeather(self.weatherData, result, errortext)
        return
Example #14
0
def date_list(h5file):
  dateList = []
  tbase = []
  ifgramList = h5file['interferograms'].keys()
  for ifgram in  ifgramList:
    dates = h5file['interferograms'][ifgram].attrs['DATE12'].split('-')
    dates1= h5file['interferograms'][ifgram].attrs['DATE12'].split('-')
    if dates[0][0] == '9':
      dates[0] = '19'+dates[0]
    else:
      dates[0] = '20'+dates[0]
    if dates[1][0] == '9':
      dates[1] = '19'+dates[1]
    else:
      dates[1] = '20'+dates[1]
    if not dates[0] in dateList: dateList.append(dates[0])
    if not dates[1] in dateList: dateList.append(dates[1])
    
  dateList.sort()
  dateList1=[]
  for ni in range(len(dateList)):
    dateList1.append(dateList[ni][2:])

  d1 = datetime.datetime(*time.strptime(dateList[0],"%Y%m%d")[0:5])
  for ni in range(len(dateList)):
    d2 = datetime.datetime(*time.strptime(dateList[ni],"%Y%m%d")[0:5])
    diff = d2-d1
    tbase.append(diff.days)
  dateDict = {}
  for i in range(len(dateList)): dateDict[dateList[i]] = tbase[i]
  return tbase,dateList,dateDict,dateList1
Example #15
0
    def to_python(self, value):
        if value is None:
            return None
        if isinstance(value, datetime.time):
            return value
        if isinstance(value, datetime.datetime):
            # Not usually a good idea to pass in a datetime here (it loses
            # information), but this can be a side-effect of interacting with a
            # database backend (e.g. Oracle), so we'll be accommodating.
            return value.time()

        # Attempt to parse a datetime:
        value = smart_str(value)
        # split usecs, because they are not recognized by strptime.
        if '.' in value:
            try:
                value, usecs = value.split('.')
                usecs = int(usecs)
            except ValueError:
                raise exceptions.ValidationError(self.error_messages['invalid'])
        else:
            usecs = 0
        kwargs = {'microsecond': usecs}

        try: # Seconds are optional, so try converting seconds first.
            return datetime.time(*time.strptime(value, '%H:%M:%S')[3:6],
                                 **kwargs)
        except ValueError:
            try: # Try without seconds.
                return datetime.time(*time.strptime(value, '%H:%M')[3:5],
                                         **kwargs)
            except ValueError:
                raise exceptions.ValidationError(self.error_messages['invalid'])
Example #16
0
 def _get_utilization(self, cr, uid, ids, name, arg, context=None):
     res = {}
     for meter in self.browse(cr, uid, ids, context=context):
         Dn = 1.0*calendar.timegm(time.strptime(time.strftime('%Y-%m-%d',time.gmtime()),"%Y-%m-%d"))
         Da = Dn - 3600*24*meter.av_time
         meter_line_obj = self.pool.get('mro.pm.meter.line')
         meter_line_ids = meter_line_obj.search(cr, uid, [('meter_id', '=', meter.id),('date', '<=', time.strftime('%Y-%m-%d',time.gmtime(Da)))], limit=1, order='date desc')
         if not len(meter_line_ids):
             meter_line_ids = meter_line_obj.search(cr, uid, [('meter_id', '=', meter.id),('date', '>', time.strftime('%Y-%m-%d',time.gmtime(Da)))], limit=1, order='date')
             if not len(meter_line_ids):
                 res[meter.id] = meter.min_utilization
                 continue
         meter_line = meter_line_obj.browse(cr, uid, meter_line_ids[0])
         Dci = 1.0*calendar.timegm(time.strptime(meter_line.date, "%Y-%m-%d"))
         Ci = meter_line.total_value
         number = 0
         Us = 0
         meter_line_ids = meter_line_obj.search(cr, uid, [('meter_id', '=', meter.id),('date', '>',meter_line.date)], order='date')
         for meter_line in meter_line_obj.browse(cr, uid, meter_line_ids):
             Dci1 = 1.0*calendar.timegm(time.strptime(meter_line.date, "%Y-%m-%d"))
             Ci1 = meter_line.total_value
             if Dci1 != Dci:
                 Us = Us + (3600*24*(Ci1 - Ci))/(Dci1 - Dci)
                 Dci = Dci1
                 Ci = Ci1
                 number += 1
         if number:
             U = Us/number
             if U<meter.min_utilization:
                 U = meter.min_utilization
         else:   U = meter.min_utilization
         res[meter.id] = U
     return res
Example #17
0
 def findSchedule(self):
     for event in self.new_events:
        is_scheduled = False
        curr_time = self.end_date - event.duration
        while not is_scheduled and not curr_time < self.start_date:
            event.start = curr_time
            event.end = curr_time + event.duration
            is_valid = True
            # check conflicts with current schedule
            for component in self.ical.walk():
                if component.name == 'VEVENT':
                    #try:
                    dc = component.decoded
                    dtstart = time.mktime(time.strptime(str(dc('dtstart')), '%Y-%m-%d %H:%M:%S+00:00'))/60
                    dtend = time.mktime(time.strptime(str(dc('dtend')), '%Y-%m-%d %H:%M:%S+00:00'))/60
                    if curr_time > dtstart and curr_time < dtend or curr_time + event.duration > dtstart and curr_time + event.duration < dtend or curr_time < dtstart and curr_time + event.duration > dtend or curr_time > dtstart and curr_time + event.duration < dtend or curr_time == dtstart or curr_time + event.duration == dtend:
                        is_valid = False
                        break
            if is_valid:
                for constraint in event.constraints:
                    if not constraint.isValid(event, self.ical):
                        is_valid = False
                        break
            if is_valid:
                self.addToCalendar(event)
                is_scheduled = True
            else:
                curr_time -= 30
Example #18
0
    def index(self, report=None, subfolder=None, filter=None, reportid=None, fromdate=None, todate=None):
        
        filter = bool(filter)
        if str(fromdate).lower() in ('today', 't') :
            fromdate = datetime.datetime.now().date()
        elif fromdate is None:
            fromdate =datetime.datetime.now().date()
        else:
            fromdate = datetime.datetime(*time.strptime(fromdate, "%d/%m/%Y")[:6]).date()

        if str(todate).lower() in ('today', 't') or todate is None:
            todate = datetime.datetime.now().date()
        else:
            todate = datetime.datetime(*time.strptime(todate, "%d/%m/%Y")[:6]).date()
            
        args = self.parent.getargs()
        
        if filter:
            args["pagetitle"] = "Historical reports"

        if 0 and not any_of(member_of_ad('Whiteclif - P&L'), member_of('admin'))(): 
            log.debug("Auth Failed")
            tabledata = Table(title='Reports',
                              columns=['&nbsp;'],
                              data=[['You do not have sufficient rights to view reports']],
                              )
            
            reportargs =  {
                "folder" : None,
                "filter" : filter,
                "fromdate" : fromdate,
                "todate" : todate,
                "authorised" : False,
                "report"     : report, 
                "subfolder"  : subfolder,
                "reportmenu" : reportconfig.reportmenu,
                "tabledata"  : tabledata,
            }
        else:
            if reportid and filter:
                reportargs = self.getargsSearch(reportparam=reportid, fromdate=fromdate, todate=todate) 
            elif report:
                groupinfo = reportconfig.groupindex.get(report, None)
                if groupinfo and groupinfo.get("groupby", None) == 'groupdir':
                    reportargs = self.getargsGroupDir(report, subfolder, filter=False, fromdate=fromdate, todate=todate)
                else:
                    reportargs = self.getargsDir(report, subfolder, filter=filter, fromdate=fromdate, todate=todate)
            else:
                reportargs = self.getargs(report, subfolder, filter=filter, fromdate=fromdate, todate=todate)
            
        args.update(reportargs)
        
        args["include"] = 'reports.html'
        args["refresh"] = False
        

        log.debug("Rendering Template")
        html = template.render(**args)
        log.debug("Rendering Template Complete")
        return html
Example #19
0
 def __init__(self,file):
     while True:
         line=file.readline()
         if line[0]=='-':
             #end of header
             break
         lineSplitPos=line.find(":")
         if lineSplitPos>=0:
             lineType=line[0:lineSplitPos]
             lineValue=line[lineSplitPos+1:]
             if lineType=='UUID':
                 self.patientID=lineValue
             elif lineType=='Sampling Rate':
                 self.samplingRate=float(lineValue)
             elif lineType=='Start Time':
                 offsetPos=lineValue.find("Offset:")
                 if offsetPos>0:
                     self.startTime=calendar.timegm(time.strptime(lineValue[0:offsetPos],' %Y-%m-%d %H:%M:%S '))
                     self.hoursOffset=int(lineValue[offsetPos+7:])
                     self.startTime-=self.hoursOffset*60*60
                 else:
                     self.startTime=calendar.timegm(time.strptime(lineValue,' %Y-%m-%d %H:%M:%S '))
                     self.hoursOffset=0
     self.numSignals=6            
     fileLen=os.fstat(file.fileno()).st_size
     curPos=file.tell()
     self.recordCount=(fileLen-curPos)/(14)
Example #20
0
def fetch():


    posts["upcoming"]=[]
    posts["ongoing"]=[]
    hackerrank_contests["urls"] = []
    thread_list = []
    
    thread_list.append( threading.Thread(target=fetch_codeforces) )
    thread_list.append( threading.Thread(target=fetch_topcoder) )
    thread_list.append( threading.Thread(target=fetch_hackerearth) )
    thread_list.append( threading.Thread(target=fetch_codechef) )
    thread_list.append( threading.Thread(target=fetch_hackerrank_general) )
    thread_list.append( threading.Thread(target=fetch_hackerrank_college) )
    thread_list.append( threading.Thread(target=fetch_google) )

    for thread in thread_list:
        thread.start()

    for thread in thread_list:
        thread.join()

    posts["upcoming"] = sorted(posts["upcoming"], key=lambda k: strptime(k['StartTime'], "%a, %d %b %Y %H:%M"))
    posts["ongoing"] = sorted(posts["ongoing"], key=lambda k: strptime(k['EndTime'], "%a, %d %b %Y %H:%M"))
    posts["timestamp"] = strftime("%a, %d %b %Y %H:%M:%S", localtime())
Example #21
0
def fetch_codechef():
    page = urlopen("http://www.codechef.com/contests")
    soup = BeautifulSoup(page,"html.parser")

    statusdiv = soup.findAll("div",attrs = {"class":"table-questions"})
    upcoming_contests = statusdiv[1].findAll("tr")
    if(len(upcoming_contests) <100):
        for upcoming_contest in upcoming_contests[1:]:
            details = upcoming_contest.findAll("td")
            start_time = strptime(details[2].string, "%Y-%m-%d %H:%M:%S")
            end_time = strptime(details[3].string, "%Y-%m-%d %H:%M:%S")
            duration = get_duration(int(( mktime(end_time)-mktime(start_time) )/60 ))
            posts["upcoming"].append({"Name" :  details[1].string  , "url" : "http://www.codechef.com"+details[1].a["href"] , "StartTime" : strftime("%a, %d %b %Y %H:%M", start_time),"EndTime" : strftime("%a, %d %b %Y %H:%M", end_time),"Duration":duration ,"Platform":"CODECHEF" })

        ongoing_contests = statusdiv[0].findAll("tr")
        for ongoing_contest in ongoing_contests[1:]:
            details = ongoing_contest.findAll("td")
            end_time = strptime(details[3].string, "%Y-%m-%d %H:%M:%S")
            posts["ongoing"].append({ "Name" :  details[1].string  , "url" : "http://www.codechef.com"+details[1].a["href"] , "EndTime" : strftime("%a, %d %b %Y %H:%M", end_time) ,"Platform":"CODECHEF"})
    else:
        upcoming_contests = statusdiv[0].findAll("tr")
        for upcoming_contest in upcoming_contests[1:]:
            details = upcoming_contest.findAll("td")
            start_time = strptime(details[2].string, "%Y-%m-%d %H:%M:%S")
            end_time = strptime(details[3].string, "%Y-%m-%d %H:%M:%S")
            duration = get_duration(int(( mktime(end_time)-mktime(start_time) )/60 ))
            posts["upcoming"].append({"Name" :  details[1].string  , "url" : "http://www.codechef.com"+details[1].a["href"] , "StartTime" : strftime("%a, %d %b %Y %H:%M", start_time),"EndTime" : strftime("%a, %d %b %Y %H:%M", end_time),"Duration":duration ,"Platform":"CODECHEF" })
Example #22
0
def get_dates():
    """
        Return a dictionary containing count of submissions
        on each date
    """

    if len(request.args) < 1:
        if session.handle:
            handle = str(session.handle)
        else:
            redirect(URL("default", "submissions", args=[1]))
    else:
        handle = str(request.args[0])

    stable = db.submission

    row = db.executesql("SELECT status, time_stamp, COUNT(*) FROM submission WHERE submission.stopstalk_handle='" + handle + "' GROUP BY DATE(submission.time_stamp), submission.status;")

    total_submissions = {}
    streak = 0
    max_streak = 0
    prev = curr = start = None

    for i in row:
        if streak == 0:
            streak = 1
            prev = time.strptime(str(i[1]), "%Y-%m-%d %H:%M:%S")
            prev = date(prev.tm_year, prev.tm_mon, prev.tm_mday)
            start = prev
        else:
            curr = time.strptime(str(i[1]), "%Y-%m-%d %H:%M:%S")
            curr = date(curr.tm_year, curr.tm_mon, curr.tm_mday)
            delta = (curr - prev).days
            if delta == 1:
                streak += 1
            elif delta != 0:
                streak = 0
            prev = curr

        if streak > max_streak:
            max_streak = streak

        sub_date = str(i[1]).split()[0]
        if total_submissions.has_key(sub_date):
            total_submissions[sub_date][i[0]] = i[2]
            total_submissions[sub_date]["count"] += i[2]
        else:
            total_submissions[sub_date] = {}
            total_submissions[sub_date][i[0]] = i[2]
            total_submissions[sub_date]["count"] = i[2]

    today = datetime.today().date()

    # If last streak does not match the current day
    if (today - start).days + 1 != streak:
        streak = 0

    return dict(total=total_submissions,
                max_streak=max_streak,
                curr_streak=streak)
def GenerateShippingCalander(dt):
  month = [['', '', '','','', '', ''],
     ['', '', '','','', '', ''],
     ['', '', '','','', '', ''],
    ['', '', '','','', '', ''],
     ['', '', '','','', '', ''],
     ['', '', '','','', '', ''],
     ]

  today = time.localtime().tm_mday

  start_time = time.strptime(dt, "%m/%d/%Y")
  day = start_time.tm_mday
  wday = start_time.tm_wday
  last_day = calendar.monthrange(start_time.tm_year, start_time.tm_mon)[1]

  row_no = 0
  while day <= last_day:
    d1 = datetime.datetime(start_time.tm_year, start_time.tm_mon, day)
    d2 = datetime.datetime.now()
    cur_time = time.strptime(time.strftime("%Y/%m/" + str(day), start_time), "%Y/%m/%d")
    day = cur_time.tm_mday
    wday = cur_time.tm_wday
    script = ''
    method = ''
    bgcolor = "#FFFFFF"
    days_diff = (d1 - d2).days + 1
    if days_diff <  0:
      bgcolor = "card5_grey"
    elif days_diff == 0:
      bgcolor = "card5_color2"
    elif days_diff ==  1:
      bgcolor = "card5_color3"
      method = "NextDay"
      script = time.strftime("%m/" + str(day).zfill(2) + "/%Y", start_time)
    elif days_diff ==  2:
      method = "SecondDay"
      bgcolor = "card5_color4"
      script = time.strftime("%m/" + str(day).zfill(2) + "/%Y", start_time)
    elif days_diff > 2:
      method = "GroundShipping"
      bgcolor = "card5_color7"
      script = time.strftime("%m/" + str(day).zfill(2) + "/%Y", start_time)        

    if days_diff >= 0:
      if wday == 6:
         bgcolor = "card5_color6"
         script = ''
      elif wday == 5:
         method = "Saturday"
         script = time.strftime("%m/" + str(day).zfill(2) + "/%Y", start_time)
         bgcolor = "card5_color5"


    day_hash = {'wday': wday, 'day': day, 'bgcolor':bgcolor, 'script':script, 'method': method}
    month[row_no][wday] = day_hash 
    if wday == 6:
      row_no += 1
    day += 1
  return month    
Example #24
0
def calcDays(startDate,endDate):
    #对比两个日期的时间差
    startDate=time.strptime(startDate,"%Y-%m-%d %H:%M:%S")
    endDate=time.strptime(endDate,"%Y-%m-%d %H:%M:%S")
    startDate=datetime(startDate[0],startDate[1],startDate[2],startDate[3],startDate[4],startDate[5])
    endDate=datetime(endDate[0],endDate[1],endDate[2],endDate[3],endDate[4],endDate[5])
    return (endDate-startDate).days
Example #25
0
 def __init__(self, pidfile, cfgfile):
     Daemon.__init__(self, pidfile)
     self.jobs = {}
     self.immediately = False
     self.scheduler = Scheduler(daemonic=False)
     self.logger = logging.getLogger(self.__class__.__name__)
     if os.path.exists(cfgfile):
         with open(cfgfile, 'rt') as f:
             config = yaml.load(f.read())
         for k1 in config.keys():
             if k1 == 'version':
                 pass
             if k1 == 'immediately':
                 self.immediately = config[k1]
             elif k1 == 'taobao':
                 self.jobs[k1] = config[k1]
                 self.jobs[k1]['id'] = None
                 if 'chktime' in self.jobs[k1].keys():
                     self.jobs[k1]['btime'] = time.strptime(self.jobs[k1]['chktime'].split('-')[0], '%H:%M')
                     self.jobs[k1]['etime'] = time.strptime(self.jobs[k1]['chktime'].split('-')[1], '%H:%M')
                     if self.jobs[k1]['btime'] >= self.jobs[k1]['etime']:
                         raise ValueError('"chktime" is illegal')
                 else:
                     raise ValueError('There is no "chktime" be found in configure.')
             else:
                 pass
     else:
         self.logger.error('{0} not found'.format(cfgfile))
Example #26
0
    def set_json_aux_matches(self):
        match = None
        count = 1
        if not self.html_aux_matches:
            return None

        for td in self.html_aux_matches.find_all('td'):
            match = td.find("div", {'data-type': 'matches'})
            if count == 1:
                self.last_match_json = {
                    'match_id': match.find("div", {'data-type': 'matches'}).attrs.get('data-id'),
                    'date': datetime.fromtimestamp(time.mktime(time.strptime(match.find("div", {'class': 'm-date'}).attrs.get('data-matchdate'), '%Y%m%d'))),
                    'date_string': match.find("div", {'class': 'm-date'}).attrs.get('data-matchdate'),
                    'home': match.find("div", {'class': 'home'}).find('span', {'class': 't-nText'}).get_text(),
                    'home_logo': match.find('div', {'class': 'home'}).find('img', {'class': 't-i-3-logo'}).attrs.get('src'),
                    'away': match.find('div', {'class': 'away'}).find('span', {'class': 't-nText'}).get_text(),
                    'away_logo': match.find('div', {'class': 'away'}).find('img', {'class': 't-i-3-logo'}).attrs.get('src'),
                    'res': match.find('span', {'class': 's-resText'}).get_text(),
                    'is_res': True
                }
            else:
                self.next_match_json = {
                    'match_id': match.find("div", {'data-type': 'matches'}).attrs.get('data-id'),
                    'date': datetime.fromtimestamp(time.mktime(time.strptime(match.find("div", {'class': 'm-date'}).attrs.get('data-matchdate'), '%Y%m%d'))),
                    'date_string': match.find("div", {'class': 'm-date'}).attrs.get('data-matchdate'),
                    'home': match.find("div", {'class': 'home'}).find('span', {'class': 't-nText'}).get_text(),
                    'home_logo': match.find('div', {'class': 'home'}).find('img', {'class': 't-i-3-logo'}).attrs.get('src'),
                    'away': match.find('div', {'class': 'away'}).find('span', {'class': 't-nText'}).get_text(),
                    'away_logo': match.find('div', {'class': 'away'}).find('img', {'class': 't-i-3-logo'}).attrs.get('src'),
                    'is_res': True
                }
            count += 1
Example #27
0
  def attributes(self, node):
    attr = VMap()
    vfile = node.open()
    img = Image.open(vfile) 
    info = img._getexif()
    vfile.close()
    for tag, values in info.items():
      if tag in self.dateTimeTags:
       try:
	decoded = str(TAGS.get(tag, tag))
 	try:
	  dt = strptime(values, "%Y:%m:%d %H:%M:%S") 
        except ValueError:
	  try:
	    dt = strptime(values[:-6], "%Y-%m-%dT%H:%M:%S")
	  except ValueError:
	    dt = strptime(values.rstrip(' '),  "%a %b %d %H:%M:%S")
	vt = vtime(dt.tm_year, dt.tm_mon, dt.tm_mday, dt.tm_hour, dt.tm_min, dt.tm_sec, 0)
        vt.thisown = False
	attr[decoded] = Variant(vt) 	
       except Exception as e:
	attr[decoded] = Variant(str(values))
      else:	
        decoded = str(TAGS.get(tag, tag))
        if isinstance(values, tuple):
	  vl = VList()
	  for value in values:
	     vl.push_back(Variant(value))
          attr[decoded] = vl
        else:
          attr[decoded] = Variant(values)
    return attr
Example #28
0
    def fix_unicode(self):
        for conf in self.FEC_CONFIG:
            try:
                infile = open(os.path.join(self._working_dir(conf), conf.filename), "r")
            except:
                infile = open(os.path.join(self._working_dir(conf), conf.filename)[:-3] + "dta", "r")

            outfile = open(os.path.join(self._working_dir(conf), conf.filename[:-3] + "txt.utf8"), "w")

            for line in infile:
                try:
                    fixed_line = line.decode("utf8", "replace").encode("utf8", "replace")
                except:
                    fixed_line = line
                    self.log.info("utf problem" + line)
                    continue  # don't include this line in the database (!)

                if not conf.schema:
                    parts = fixed_line.split("|")
                    if len(parts[13]) == 7:
                        parts[13] = "0" + parts[13][:2] + parts[13][3:]
                    date = parts[13][-4:] + "-" + parts[13][:2] + "-" + parts[13][2:4]
                    try:
                        time.strptime(date, "%Y-%m-%d")
                    except:
                        date = ""
                    parts[13] = date
                    fixed_line = "|".join(parts)
                try:
                    outfile.write(conf.cycle + "|" + fixed_line)
                except:
                    self.log.info("couldnt write " + fixed_line)
Example #29
0
def get_milli_timestamp(time_str):
    if len(time_str) == 8:
        struct_time = time.strptime(time_str, "%Y%m%d")
    elif len(time_str) == 10:
        struct_time = time.strptime(time_str, "%Y%m%d%H")

    return int(time.mktime(struct_time) * 1000)
Example #30
0
def fetch_topcoder():
    try:
        page = urlopen("https://clients6.google.com/calendar/v3/calendars/[email protected]/events?calendarId=appirio.com_bhga3musitat85mhdrng9035jg%40group.calendar.google.com&singleEvents=true&timeZone=Asia%2FCalcutta&maxAttendees=1&maxResults=250&sanitizeHtml=true&timeMin=2015-04-26T00%3A00%3A00-04%3A00&timeMax=2016-06-07T00%3A00%3A00-04%3A00&key=AIzaSyBNlYH01_9Hc5S1J9vuFmu2nUqBZJNAXxs",timeout=15)
        data = json.load(page)["items"]
        cur_time = localtime()
        for item in data:
		if(item["start"].has_key("date")):continue
		        
                start_time = strptime(item["start"]["dateTime"][:19], "%Y-%m-%dT%H:%M:%S")
                start_time_indian = strftime("%a, %d %b %Y %H:%M",start_time)
                end_time = strptime(item["end"]["dateTime"][:19], "%Y-%m-%dT%H:%M:%S")
                end_time_indian = strftime("%a, %d %b %Y %H:%M",end_time)

                duration = get_duration(int(( mktime(end_time)-mktime(start_time) )/60 ))
                name = item["summary"]
                if "SRM" in name and "description" in item: url = "http://community.topcoder.com/tc?module=MatchDetails&rd="+ item["description"][110:115]
                else :            url = "http://tco15.topcoder.com/algorithm/rules/"
                
                if cur_time<start_time:
                    posts["upcoming"].append({ "Name" :  name , "url" : url ,"EndTime" : end_time_indian,"Duration":duration, "StartTime" :  start_time_indian,"Platform":"TOPCODER"  })
                elif cur_time>start_time and cur_time<end_time:
                    posts["ongoing"].append({ "Name" :  name , "url" : url ,"EndTime" : end_time_indian,"Platform":"TOPCODER"  })
                    
    except Exception, e:
        pass
Example #31
0


# =========================================

tags = flip_flags(flags)

files = [f for f in listdir(inpath) if isfile(join(inpath, f))]

for file in files:
  transactions = openfile( file )

  for transaction in transactions:
    transaction.append(file)

    trans_date = time.strptime(transaction[0], "%d/%m/%Y")
    transaction[0] = time.strftime("%Y-%m-%d", trans_date)

    if transaction[1][0] == '+':
      transaction[1] = transaction[1][1:]

    if not add_flags(transaction):
      unmatched += 1
      unmatched_amount -= float(transaction[1])
      unmatched_tags.append(transaction[2][0:30].strip())
      messages.append(transaction[1] + ' ' + transaction[2][0:100].strip().ljust(100) + ' ' + transaction[4])
      # print transaction[1] + ' ' +  transaction[2]

    # loop through our tags dict and see if any of the tags match our message
    # conflict = transaction[5] == transaction[7]
    # transaction.append(str(conflict))
Example #32
0
localtime2 = time.asctime(time.localtime(time.time()))
print "当前时间戳为:", ticks
print "本地时间为 :", localtime1
print "本地格式化时间为 :", localtime2

print'\n\n'

# 格式化成2016-03-20 11:45:39形式
print '格式化成2016-03-20 11:45:39形式:', time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())

# 格式化成Sat Mar 28 22:24:24 2016形式
print '格式化成Sat Mar 28 22:24:24 2016形式:', time.strftime("%a %b %d %H:%M:%S %Y", time.localtime())

# 将格式字符串转换为时间戳
a = "Sat Mar 28 22:24:24 2016"
print '将格式字符串转换为时间戳:', time.mktime(time.strptime(a, "%a %b %d %H:%M:%S %Y"))

print'\n\n'

import calendar

cal = calendar.month(2018, 3)
print "以下输出2016年1月份的日历:"
print cal;

# 函数 -------------------
# 函数代码块以 def 关键词开头,后接函数标识符名称和圆括号()。
# 任何传入参数和自变量必须放在圆括号中间。圆括号之间可以用于定义参数。
# 函数的第一行语句可以选择性地使用文档字符串—用于存放函数说明。
# 函数内容以冒号起始,并且缩进。
# return [表达式] 结束函数,选择性地返回一个值给调用方。不带表达式的return相当于返回 None。
Example #33
0
import sys
import time
for data in sys.stdin:
    data = data.strip()
    fields = data.split('\t')
    if len(fields) != 11:
        continue
    [_, Time, userid, rc, AC, query, CI, iC, Post, _, dialogue] = fields
    if userid[-1]!='f':
        continue
    if rc == 'godText':
        try:
            t1 = time.strptime(Time[:20], '%d/%b/%Y:%H:%M:%S')
            Time = time.strftime('%Y-%m-%d %H:%M:%S',t1)
            #idx = Post.find('[seq]')
            #sc = Post[:idx]
            #sc = Post.split('[seq]')[0]
            sc = 'sc_null'
            S = [userid,Time,AC,query,sc]
            S = '\t'.join(S)
            sys.stdout.write("%s\n" % S)
        except:
            continue
Example #34
0
def HMS2ts(date):
    return int(time.mktime(time.strptime(date, '%Y-%m-%d')))
Example #35
0
      ,time.localtime().tm_mday)

#表示方式的相互转换
#元组转化为时间戳
print('元组转换为时间戳:',time.mktime(t))
#当前系统时间转换成时间戳
print(time.mktime(time.localtime()))
#时间戳转换为时间元组
print(time.localtime(1502591382))
print('当前时间戳转换为时间元组:',time.localtime(time.time()))

#元组转换为时间格式字符串,注意区分大小写
print('当前时间转换成时间格式的字符串:',time.strftime('%Y-%m-%d %H:%M:%S',time.localtime()))

#时间格式字符串转换成时间元组,后面的格式描述不能少
print('时间格式字符串转换成时间元组:',time.strptime('2021-02-21 21:00:09','%Y-%m-%d %H:%M:%S'))

#datatime模块,表示日期时间
import datetime
print(datetime.datetime(2021,2,21,21,37,50))  #以时间元组创建日期时间,年月日时必写,时分秒不填默认00:00:00
print(datetime.datetime.now())   #获取当前时间,精确到微秒
print(datetime.datetime.today())   #获取当前时间

#时间表示形式相互转换
print('当前日期时间转换成时间戳',datetime.datetime.now().timestamp())   #
print('把时间戳转换成日期时间',datetime.datetime.fromtimestamp(1613914941.140178))
print('日期时间转换成时间格式字符串',datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
print('时间字符串转换成日期时间',datetime.datetime.strptime('2021-02-21 21:47:44','%Y-%m-%d %H:%M:%S'))

#日历模块
import calendar
Example #36
0
 def node_start_time(cls, node):
     time_str = node.extra['launch_time'].split('.', 2)[0] + 'UTC'
     return time.mktime(time.strptime(
             time_str,'%Y-%m-%dT%H:%M:%S%Z')) - time.timezone
Example #37
0
    def parse(self, response):
        self.isCOOKIE = True
        self.isOK = True
        # print('----------------------------')
        # print(response.text)
        #把当天的航班信息和价格信息分类
        time_numbers = response.xpath('//td[contains(@class,"fl_date")]')
        prices = response.xpath(
            '//td[contains(@class,"outward-total-fare-td")]')
        # print('-'*50)
        # print(response.text)
        # print('+' * 50)
        if not time_numbers:
            # print('!'*30)
            # print '%s' % (response.text.decode('utf-8').encode('gbk', 'ignore'))
            # self.log("no data",40)
            try:
                page = response.xpath('//h2/text()')[0].extract()
                # print(page)
                # print(response.status)
                # # print response.text
                # # proxy_invalid = response.xpath('//td[4]/text()')[0].extract()
                # # print proxy_invalid
                # if response.status == 404:
                #     self.isOK = False
                #     yield scrapy.Request(self.start_urls[0],
                #                          method='POST',
                #                          headers=self.custom_settings.get('headers'),
                #                          body=response.meta.get('meta_data').get('form'),
                #                          callback=self.parse,
                #                          meta={'meta_data': response.meta.get('meta_data')},
                #                          errback=self.errback)
                if page == 'Are you human?':
                    self.isCOOKIE = False
                    yield scrapy.Request(
                        self.start_urls[0],
                        method='POST',
                        headers=self.custom_settings.get('headers'),
                        body=response.meta.get('meta_data').get('form'),
                        callback=self.parse,
                        meta={'meta_data': response.meta.get('meta_data')},
                        errback=self.errback)
                self.task.append(response.meta.get('meta_data').get('invalid'))
            except:
                self.log("no data", 10)
                self.task.append(response.meta.get('meta_data').get('invalid'))
            return

        #循环取出每个航班信息,year指的是航班的年份
        year = response.meta.get('meta_data').get('year')
        # print(len(time_numbers))

        for i in range(len(time_numbers)):
            #取出当次的航班信息,出发时间,到达时间,航班号
            time_number = time_numbers[i].xpath('./span/text()').extract()
            # 出发时间
            deptime = time.strptime(year + time_number[0], '%Y%m/%d\xa0%H:%M')
            depTime = time.mktime(deptime)
            # 到达时间
            arrtime = time.strptime(year + time_number[1], '%Y%m/%d\xa0%H:%M')
            arrTime = time.mktime(arrtime)
            # 航班号
            flightNumber = time_number[2]
            carrier = re.search('\D{2}', time_number[2]).group()

            #根据n定位当前航班价格,总价除以maxSeats为单人价格
            maxSeats = response.meta.get('meta_data').get('maxSeats')
            try:
                price = prices[i].xpath(
                    './div[@id="outward_hp_' + str(i + 1) +
                    '_total_fare"]//span/text()').extract()
            except:
                #这种情况是ip有问题,得到数据是错误的
                self.log('Dangerous error data....', 40)
                self.isOK = False
            if price[3] == '0':
                price = prices[i].xpath(
                    '//div[@id="outward_hpp_' + str(i + 1) +
                    '_total_fare"]//span/text()').extract()
            if price[3] == '0':
                price = prices[i].xpath(
                    '//div[@id="outward_prime_' + str(i + 1) +
                    '_total_fare"]//span/text()').extract()
            if price[3] == '0':
                self.task.append(response.meta.get('meta_data').get('invalid'))
                continue
            # print(price)
            # 取价格

            netFare = int(
                re.search(r"\d.*", price[0]).group().replace(',',
                                                             '')) / maxSeats
            adultTax = int(
                re.search(r"\d.*", price[1]).group().replace(',',
                                                             '')) / maxSeats
            #增加价格打折的判断
            promo = response.xpath('//td[@id="outward_hp_' + str(i + 1) +
                                   '_list"]/@class').extract()[0].split(' ')
            if promo[-1] == 'promo':
                adultPrice = int(
                    re.search(r"\d.*", price[3]).group().replace(
                        ',', '')) / maxSeats / 0.7
                cabin = 'S'
            else:
                cabin = 'X'
                adultPrice = int(
                    re.search(r"\d.*", price[3]).group().replace(
                        ',', '')) / maxSeats
            # 判断网页信息是否虚假
            if not price[2]:
                return
            currency = self.custom_settings.get('CURRENCY_CACHE').get(price[2])

            depAirport = response.meta.get('meta_data').get('invalid').get(
                'depAirport')
            arrAirport = response.meta.get('meta_data').get('invalid').get(
                'arrAirport')

            isChange = 1
            segments = dict(
                flightNumber=flightNumber,
                aircraftType='',
                number=1,
                departureTime=time.strftime('%Y-%m-%d %H:%M:%S',
                                            time.localtime(depTime)),
                destinationTime=time.strftime('%Y-%m-%d %H:%M:%S',
                                              time.localtime(arrTime)),
                airline=carrier,
                dep=depAirport,
                dest=arrAirport,
                seats=int(maxSeats),
                duration=dataUtil.gen_duration(depTime, arrTime),
                depTerminal='')
            getTime = time.time()

            item = WowSpiderItem()
            item['flightNumber'] = flightNumber
            item['depTime'] = depTime
            item['arrTime'] = arrTime
            item['fromCity'] = self.portCitys.get(depAirport, depAirport)
            item['toCity'] = self.portCitys.get(arrAirport, arrAirport)
            item['depAirport'] = depAirport
            item['arrAirport'] = arrAirport
            item['currency'] = currency
            item['adultPrice'] = adultPrice
            item['adultTax'] = adultTax
            item['netFare'] = netFare
            item['maxSeats'] = maxSeats
            item['cabin'] = cabin
            item['carrier'] = carrier
            item['isChange'] = isChange
            item['segments'] = '[]'
            item['getTime'] = getTime
            yield item
# coding: utf-8
import time
time.stftime('%Y-%m-%d %H:%M:%S', '2018-09-12 12:12:12')
time.strptime('%Y-%m-%d %H:%M:%S', '2018-09-12 12:12:12')
time.strstime('%Y-%m-%d %H:%M:%S', '2018-09-12 12:12:12')
time.strptime('2018-09-12 12:12:12', "%Y-%m-%d %H:%M:%S")
time.strptime('2018-09-12 12:12:12', "%Y-%m-%d")
time.strptime('2018-09-12 12:12:12', "%Y-%m-%d %H:%M:%S")
t = time.strptime('2018-09-12 12:12:12', "%Y-%m-%d %H:%M:%S")
t
dir(t)
time.mktime(t)
ss = time.mktime(t)
datetime.datetime.fromtimestamp(ss)
import datetime
datetime.datetime.fromtimestamp(ss)
datetime.datetime.fromtimestamp(ss).date
datetime.datetime.fromtimestamp(ss).date()
d =datetime.datetime.fromtimestamp(ss).date()
str(d)
datetime.datetime('2018-09-12 12:12:12')
dir(datetime)
dir(datetime.time)
from datetime import datetime
datetime.strptime('2018-08-12 12:12:21', "%Y-%m-%d %H:%M:%S")
datetime.strptime('2018-08-12 12:12:21', "%Y-%m-%d %H:%M:%S").date()
import pandas as pd
d =[array([10,  1,  7,  3]),
 array([ 0, 14, 12, 13]),
 array([ 3, 10,  7,  8]),
 array([7, 5]),
Example #39
0
    header = fh.readline().strip().split('\t')
    header.insert(1, 'Kingdom')
    print('\t'.join(header))
    for line in fh:
        line = line.strip()
        word = line.split('\t')
        size = word[6]
        gc = word[7]
        genes = word[14]
        proteins = word[15]
        release = word[17]
        if word[0] not in check:
            check[word[0]] = [size, gc, genes, proteins, release]
        elif word[0] in check:
            try:
                if time.strptime(check[word[0]][-1],
                                 "%Y/%m/%d") < time.strptime(
                                     release, "%Y/%m/%d"):
                    check[word[0]] = [size, gc, genes, proteins, release]
            except:
                if check[word[0]][-1] == '-':
                    check[word[0]] = [size, gc, genes, proteins, release]
                else:
                    pass

test1 = {}
with open('bacteria.txt') as fh:
    for line in fh:
        line = line.strip().split('\t')
        if line[0] in check:
            test1[line[0]] = line[:4] + check[line[0]]
Example #40
0
def timestamp2unix(time_string, pattern='%Y-%m-%d %H:%M:%S'):
    time_array = time.strptime(time_string, pattern)
    return int(time.mktime(time_array))
def grab_tv_guide(channels):
    programs = {}

    guide_json = urlquick.get(URL_PROGRAMS, max_age=-1)
    guide_json = json.loads(guide_json.text)
    guide_items = guide_json['data']['items']

    for guide_item in guide_items:
        channel = guide_item['channel']
        channel_crt_id = channel['id']

        if channel_crt_id in ID_CHANNELS and \
                ID_CHANNELS[channel_crt_id] in channels:

            channel_id = ID_CHANNELS[channel_crt_id]
            program_dict = {}

            program_dict['duration'] = guide_item['duration']  # sec

            start_s = guide_item['startedAt']
            start_s = start_s.split('+')[0]

            try:
                start = datetime.datetime.strptime(start_s, GUIDE_TIME_FORMAT)
            except TypeError:
                start = datetime.datetime(
                    *(time.strptime(start_s, GUIDE_TIME_FORMAT)[0:6]))

            try:
                local_tz = get_localzone()
            except Exception:
                # Hotfix issue #102
                local_tz = pytz.timezone('Europe/Brussels')

            start = GUIDE_TIMEZONE.localize(start)
            start = start.astimezone(local_tz)
            start_time = start.strftime("%Hh%M")

            program_dict['start_time'] = start_time

            program_dict['genre'] = guide_item['program']['formatGenre'][
                'genre']['name']

            program_dict['program_id'] = guide_item['id']

            program_dict['title'] = guide_item['title']

            # https://tel.img.pmdstatic.net/fit/http.3A.2F.2Fimages.2Eone.2Eprismamedia.2Ecom.2Fchannel.2F2.2F3.2F9.2Fe.2F1.2F3.2F1.2Fb.2Ff.2Fd.2Fa.2F0.2F2.2Fa.2F3.2Fd.2Epng/76x76/quality/100/image.png
            # https://tel.img.pmdstatic.net/fit/http.3A.2F.2Fimages.2Eone.2Eprismamedia.2Ecom.2Fprogram.2Fe.2F0.2F0.2F2.2F5.2Fc.2F3.2F3.2F2.2F1.2F1.2F1.2F1.2Fa.2F4.2F9.2Ejpg/520x336/quality/75/image.jpg
            # https://tel.img.pmdstatic.net/{transformation}/http.3A.2F.2Fimages.2Eone.2Eprismamedia.2Ecom.2Fprogram.2F8.2F0.2F4.2F4.2F4.2F7.2F9.2F3.2F5.2F4.2F9.2F7.2F4.2Fb.2Ff.2Fd.2Ejpg/{width}x{height}/{parameters}/{title}.jpg

            if 'image' in guide_item['program']:
                image = guide_item['program']['image']['urlTemplate']
                image = image.replace('{transformation}', 'fit')
                image = image.replace('{width}', '520')
                image = image.replace('{height}', '336')
                image = image.replace('{parameters}', 'quality/75')
                image = image.replace('{title}', 'image')
                program_dict['thumb'] = image

            programs[channel_id] = program_dict

    return programs
Example #42
0
def tojstime(d):
    return time.mktime(time.strptime(str(d), '%Y-%m-%d')) * 1000
Example #43
0
    def test_03_stats(self):
        """Test STATS stats method works"""
        today = unicode(datetime.date.today())
        hour = int(datetime.datetime.utcnow().strftime('%H'))
        date_ms = time.mktime(time.strptime(today, "%Y-%m-%d")) * 1000
        anon = 0
        auth = 0
        TaskRunFactory.create(task=self.project.tasks[0])
        TaskRunFactory.create(task=self.project.tasks[1])
        dates_stats, hours_stats, user_stats = stats.get_stats(self.project.id)
        for item in dates_stats:
            if item['label'] == 'Anon + Auth':
                assert item['values'][-1][0] == date_ms, item['values'][0][0]
                assert item['values'][-1][
                    1] == 10, "There should be 10 answers"
            if item['label'] == 'Anonymous':
                assert item['values'][-1][0] == date_ms, item['values'][0][0]
                anon = item['values'][-1][1]
            if item['label'] == 'Authenticated':
                assert item['values'][-1][0] == date_ms, item['values'][0][0]
                auth = item['values'][-1][1]
            if item['label'] == 'Total Tasks':
                assert item['values'][-1][0] == date_ms, item['values'][0][0]
                assert item['values'][-1][1] == 4, "There should be 4 tasks"
            if item['label'] == 'Expected Answers':
                assert item['values'][0][0] == date_ms, item['values'][0][0]
                for i in item['values']:
                    assert i[1] == 100, "Each date should have 100 answers"
                assert item['values'][0][
                    1] == 100, "There should be 10 answers"
        assert auth + anon == 10, "date stats sum of auth and anon should be 10"

        max_hours = 0
        for item in hours_stats:
            if item['label'] == 'Anon + Auth':
                max_hours = item['max']
                print item
                assert item['max'] == 10, item['max']
                assert item['max'] == 10, "Max hours value should be 10"
                for i in item['values']:
                    if i[0] == hour:
                        assert i[1] == 10, "There should be 10 answers"
                        assert i[2] == 5, "The size of the bubble should be 5"
                    else:
                        assert i[1] == 0, "There should be 0 answers"
                        assert i[2] == 0, "The size of the buggle should be 0"
            if item['label'] == 'Anonymous':
                anon = item['max']
                for i in item['values']:
                    if i[0] == hour:
                        assert i[1] == anon, "There should be anon answers"
                        assert i[2] == (
                            anon * 5
                        ) / max_hours, "The size of the bubble should be 5"
                    else:
                        assert i[1] == 0, "There should be 0 answers"
                        assert i[2] == 0, "The size of the buggle should be 0"
            if item['label'] == 'Authenticated':
                auth = item['max']
                for i in item['values']:
                    if i[0] == hour:
                        assert i[1] == auth, "There should be anon answers"
                        assert i[2] == (
                            auth * 5
                        ) / max_hours, "The size of the bubble should be 5"
                    else:
                        assert i[1] == 0, "There should be 0 answers"
                        assert i[2] == 0, "The size of the buggle should be 0"
        assert auth + anon == 10, "date stats sum of auth and anon should be 8"

        err_msg = "user stats sum of auth and anon should be 7"
        assert user_stats['n_anon'] + user_stats['n_auth'] == 7, err_msg
Example #44
0
def create_project(project_name,expired=None):
    #创建新项目,project_name是项目名,expired是过期时间,字串输入 2019-01-01 格式
    create_project_list = """CREATE TABLE IF NOT EXISTS `project_list` (
    `project_name` varchar(255) DEFAULT NULL COMMENT '项目名称',
    `created_at` int(11) DEFAULT NULL COMMENT '创建时间',
    `expired_at` int(11) DEFAULT NULL COMMENT '过期时间',
    `event_count` bigint(20) DEFAULT NULL COMMENT '事件量',
    `device_count` bigint(20) DEFAULT NULL COMMENT '设备数',
    `user_count` bigint(20) DEFAULT NULL COMMENT '用户数',
    `enable_scheduler` int(4) DEFAULT 1 COMMENT '是否启动定时器支持'
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;"""

    create_shortcut = """CREATE TABLE if not EXISTS `shortcut` (
    `project` varchar(255) DEFAULT NULL COMMENT '项目名',
    `short_url` varchar(255) DEFAULT NULL COMMENT '短链地址',
    `long_url` varchar(768) DEFAULT NULL COMMENT '长链地址',
    `expired_at` int(11) DEFAULT NULL COMMENT '过期时间',
    `created_at` int(11) DEFAULT NULL COMMENT '创建时间',
    `src` varchar(10) DEFAULT NULL COMMENT '使用的第三方创建源',
    `src_short_url` varchar(255) DEFAULT NULL COMMENT '创建源返回的短地址',
    `submitter` varchar(255) DEFAULT NULL COMMENT '由谁提交',
    `utm_source` varchar(2048) DEFAULT NULL COMMENT 'utm_source',
    `utm_medium` varchar(2048) DEFAULT NULL COMMENT 'utm_medium',
    `utm_campaign` varchar(2048) DEFAULT NULL COMMENT 'utm_campaign',
    `utm_content` varchar(2048) DEFAULT NULL COMMENT 'utm_content',
    `utm_term` varchar(2048) DEFAULT NULL COMMENT 'utm_term',
    KEY `short_url` (`short_url`),
    KEY `long_url` (`long_url`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;"""
    create_shortcut_history = """CREATE TABLE IF NOT EXISTS `shortcut_history` (
    `short_url` varchar(255) DEFAULT NULL COMMENT '解析短链',
    `result` varchar(255) DEFAULT NULL COMMENT '解析的结果',
    `cost_time` int(11) DEFAULT NULL COMMENT '耗费时间',
    `ip` varchar(255) DEFAULT NULL,
    `created_at` int(11) DEFAULT NULL COMMENT '解析时间',
    `user_agent` text DEFAULT NULL,
    `accept_language` text DEFAULT NULL,
    `ua_platform` varchar(255) DEFAULT NULL,
    `ua_browser` varchar(255) DEFAULT NULL,
    `ua_version` varchar(255) DEFAULT NULL,
    `ua_language` varchar(255) DEFAULT NULL,
    KEY `created_at` (`created_at`),
    KEY `short_url` (`short_url`),
    KEY `short_url_result` (`short_url`,`result`)
    ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;"""

    create_mobile_ad_src ="""CREATE TABLE if not EXISTS `mobile_ad_src` (
    `src` varchar(255) NOT NULL COMMENT '创建源名称',
    `src_name` varchar(255) DEFAULT NULL COMMENT '创建源的中文名字',
    `src_args` varchar(1024) DEFAULT NULL COMMENT '创建源自带参数',
    `created_at` int(11) DEFAULT NULL COMMENT '创建时间',
    `updated_at` int(11) DEFAULT NULL COMMENT '维护时间',
    `utm_source` varchar(255) DEFAULT NULL COMMENT '预制的utm_source',
    `utm_medium` varchar(255) DEFAULT NULL COMMENT '预制的utm_medium',
    `utm_campaign` varchar(255) DEFAULT NULL COMMENT '预制的utm_campaign',
    `utm_content` varchar(255) DEFAULT NULL COMMENT '预制的utm_content',
    `utm_term` varchar(255) DEFAULT NULL COMMENT '预制的utm_term',
    PRIMARY KEY (`src`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;"""
    create_mobile_ad_list ="""CREATE TABLE if not EXISTS `mobile_ad_list` (
    `project` varchar(255) DEFAULT NULL COMMENT '项目名',
    `url` varchar(768) NOT NULL COMMENT '监测地址',
    `expired_at` int(11) DEFAULT NULL COMMENT '过期时间',
    `created_at` int(11) DEFAULT NULL COMMENT '创建时间',
    `src` varchar(255) DEFAULT NULL COMMENT '使用的检测原id',
    `src_url` varchar(1024) DEFAULT NULL COMMENT '使用的检测模板',
    `submitter` varchar(255) DEFAULT NULL COMMENT '由谁提交',
    `utm_source` varchar(2048) DEFAULT NULL COMMENT 'utm_source',
    `utm_medium` varchar(2048) DEFAULT NULL COMMENT 'utm_medium',
    `utm_campaign` varchar(2048) DEFAULT NULL COMMENT 'utm_campaign',
    `utm_content` varchar(2048) DEFAULT NULL COMMENT 'utm_content',
    `utm_term` varchar(2048) DEFAULT NULL COMMENT 'utm_term',
    PRIMARY KEY (`url`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;"""
    created_shortcut_read="""CREATE TABLE if not EXISTS `shortcut_read` (
    `short_url` varchar(255) NOT NULL COMMENT '短链地址',
    `ip` varchar(20) DEFAULT NULL COMMENT 'ip',
    `created_at` int(11) DEFAULT NULL COMMENT '时间',
    `user_agent` text DEFAULT NULL COMMENT 'ua',
    `accept_language` text DEFAULT NULL COMMENT '语言',
    `ua_platform` varchar(255) DEFAULT NULL COMMENT '平台',
    `ua_browser` varchar(255) DEFAULT NULL COMMENT '浏览器',
    `ua_version` varchar(255) DEFAULT NULL COMMENT '版本号',
    `ua_language` varchar(255) DEFAULT NULL COMMENT '语言',
    `referrer` text DEFAULT NULL COMMENT '页面',
    KEY `short_url` (`short_url`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;"""
    blacklist_sql_1="""CREATE TABLE `recall_blacklist` (
    `id` int(11) NOT NULL AUTO_INCREMENT,
    `project` varchar(255) NOT NULL COMMENT '项目名',
    `distinct_id` varchar(255) DEFAULT NULL,
    `key` varchar(255) NOT NULL COMMENT '渠道key',
    `type_id` int(11) NOT NULL COMMENT '渠道类型',
    `reason_id` int(11) DEFAULT NULL COMMENT '原因id',
    `owner` varchar(255) DEFAULT NULL COMMENT '第一次操作所属人',
    `latest_owner` varchar(255) DEFAULT NULL COMMENT '最后一次操作所属人',
    `status` int(11) DEFAULT NULL COMMENT '状态',
    `created_at` int(11) DEFAULT NULL COMMENT '创建时间',
    `updated_at` int(11) DEFAULT NULL COMMENT '更新时间',
    PRIMARY KEY (`id`),
    UNIQUE KEY `anti_copy` (`key`,`type_id`,`project`),
    KEY `check_blacklist` (`status`,`key`,`type_id`,`project`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin AUTO_INCREMENT=1;"""
    blacklist_sql_2="""CREATE TABLE `recall_blacklist_history` (
    `rbid` int(11) NOT NULL COMMENT 'recall_blacklist的id',
    `checker` varchar(255) DEFAULT NULL COMMENT '查询者的名字',
    `result_status_id` int(11) DEFAULT NULL COMMENT '返回的status_code里pid是39的状态',
    `result_reason_id` int(11) DEFAULT NULL COMMENT '返回的status_code里pid是30的理由',
    `created_at` int(11) DEFAULT NULL COMMENT '创建时间',
    KEY `rbid` (`rbid`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;"""
    blacklist_sql_3="""CREATE TABLE `recall_blacklist_reason` (
    `rbid` int(11) NOT NULL COMMENT 'recall_blacklist的id',
    `reason_id` int(11) DEFAULT NULL COMMENT 'status_code里pid是30的状态',
    `reason_owner` varchar(255) DEFAULT NULL COMMENT '修改人',
    `reason_comment` varchar(255) DEFAULT NULL COMMENT '修改的备注',
    `final_status_id` int(11) DEFAULT NULL COMMENT '最后写入recall_blacklist的status_code里pid是39的状态',
    `created_at` varchar(255) DEFAULT NULL COMMENT '创建的时间',
    KEY `rbid` (`rbid`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;"""
    do_tidb_exe(create_project_list)
    do_tidb_exe(create_shortcut)
    do_tidb_exe(create_shortcut_history)
    do_tidb_exe(create_mobile_ad_src)
    do_tidb_exe(create_mobile_ad_list)
    do_tidb_exe(created_shortcut_read)
    do_tidb_exe(blacklist_sql_1)
    do_tidb_exe(blacklist_sql_2)
    do_tidb_exe(blacklist_sql_3)
    # print('project_list已生成')
    check_sql = "show tables"
    check_result,check_count = do_tidb_select(check_sql)
    tables_name = []
    for line in check_result:
        tables_name.append(line[0])
    # print(tables_name)
    check_project_list_sql = """SELECT count(*) FROM `project_list` where project_name='{project_name}'""".format(project_name=project_name)
    check_project_list_result,check_project_list_count = do_tidb_select(check_project_list_sql)
    # print(check_project_list_result[0][0])
    if project_name in tables_name or check_project_list_result[0][0]>0:
        print(project_name+'项目表单已存在')
    else:
        table_sql="""CREATE TABLE `{project_name}` (
    `track_id` bigint(17) DEFAULT NULL,
    `distinct_id` varchar(64) DEFAULT NULL,
    `lib` varchar(255) DEFAULT NULL,
    `event` varchar(255) DEFAULT NULL,
    `type` varchar(255) DEFAULT NULL,
    `all_json` json DEFAULT NULL,
    `host` varchar(255) DEFAULT NULL,
    `user_agent` varchar(2048) DEFAULT NULL,
    `ua_platform` varchar(1024) DEFAULT NULL,
    `ua_browser` varchar(1024) DEFAULT NULL,
    `ua_version` varchar(1024) DEFAULT NULL,
    `ua_language` varchar(1024) DEFAULT NULL,
    `connection` varchar(255) DEFAULT NULL,
    `pragma` varchar(255) DEFAULT NULL,
    `cache_control` varchar(255) DEFAULT NULL,
    `accept` varchar(255) DEFAULT NULL,
    `accept_encoding` varchar(255) DEFAULT NULL,
    `accept_language` varchar(255) DEFAULT NULL,
    `ip` varchar(512) DEFAULT NULL,
    `ip_city` json DEFAULT NULL,
    `ip_asn` json DEFAULT NULL,
    `url` text DEFAULT NULL,
    `referrer` varchar(2048) DEFAULT NULL,
    `remark` varchar(255) DEFAULT NULL,
    `created_at` int(11) DEFAULT NULL,
    `date` date DEFAULT NULL,
    `hour` int(2) DEFAULT NULL,
    KEY `date` (`date`),
    KEY `distinct_id` (`distinct_id`),
    KEY `event` (`event`),
    KEY `date_hour` (`date`,`hour`),
    KEY `event_date` (`event`,`date`),
    KEY `event_remark_date` (`event`,`remark`,`date`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;""".format(project_name=project_name)
        table_device_sql ="""CREATE TABLE `{project_name}_device` (
    `distinct_id` varchar(255) NOT NULL,
    `lib` varchar(255) DEFAULT NULL,
    `device_id` varchar(255) DEFAULT NULL,
    `manufacturer` varchar(255) DEFAULT NULL,
    `model` varchar(255) DEFAULT NULL,
    `os` varchar(255) DEFAULT NULL,
    `os_version` varchar(255) DEFAULT NULL,
    `ua_platform` varchar(1024) DEFAULT NULL,
    `ua_browser` varchar(1024) DEFAULT NULL,
    `ua_version` varchar(1024) DEFAULT NULL,
    `ua_language` varchar(1024) DEFAULT NULL,
    `screen_width` int(11) DEFAULT NULL,
    `screen_height` int(11) DEFAULT NULL,
    `network_type` varchar(255) DEFAULT NULL,
    `user_agent` varchar(2048) DEFAULT NULL,
    `accept_language` varchar(255) DEFAULT NULL,
    `ip` varchar(255) DEFAULT NULL,
    `ip_city` json DEFAULT NULL,
    `ip_asn` json DEFAULT NULL,
    `wifi` varchar(20) DEFAULT NULL,
    `app_version` varchar(255) DEFAULT NULL,
    `carrier` varchar(255) DEFAULT NULL,
    `referrer` text DEFAULT NULL,
    `referrer_host` varchar(2048) DEFAULT NULL,
    `bot_name` varchar(255) DEFAULT NULL,
    `browser` varchar(255) DEFAULT NULL,
    `browser_version` varchar(255) DEFAULT NULL,
    `is_login_id` varchar(255) DEFAULT NULL,
    `screen_orientation` varchar(255) DEFAULT NULL,
    `gps_latitude` decimal(11,7) DEFAULT NULL,
    `gps_longitude` decimal(11,7) DEFAULT NULL,
    `first_visit_time` datetime DEFAULT NULL,
    `first_referrer` text DEFAULT NULL,
    `first_referrer_host` varchar(768) DEFAULT NULL,
    `first_browser_language` varchar(768) DEFAULT NULL,
    `first_browser_charset` varchar(768) DEFAULT NULL,
    `first_search_keyword` varchar(768) DEFAULT NULL,
    `first_traffic_source_type` varchar(768) DEFAULT NULL,
    `utm_content` varchar(768) DEFAULT NULL,
    `utm_campaign` varchar(768) DEFAULT NULL,
    `utm_medium` varchar(768) DEFAULT NULL,
    `utm_term` varchar(768) DEFAULT NULL,
    `utm_source` varchar(768) DEFAULT NULL,
    `latest_utm_content` varchar(768) DEFAULT NULL,
    `latest_utm_campaign` varchar(768) DEFAULT NULL,
    `latest_utm_medium` varchar(768) DEFAULT NULL,
    `latest_utm_term` varchar(768) DEFAULT NULL,
    `latest_utm_source` varchar(768) DEFAULT NULL,
    `latest_referrer` varchar(2048) DEFAULT NULL,
    `latest_referrer_host` varchar(2048) DEFAULT NULL,
    `latest_search_keyword` varchar(768) DEFAULT NULL,
    `latest_traffic_source_type` varchar(255) DEFAULT NULL,
    `created_at` int(11) DEFAULT NULL,
    `updated_at` int(11) DEFAULT NULL,
    PRIMARY KEY (`distinct_id`),
    KEY `utm_campaign` (`utm_campaign`),
    KEY `utm_source` (`utm_source`),
    KEY `utm_medium` (`utm_medium`),
    KEY `utm_term` (`utm_term`),
    KEY `utm_content` (`utm_content`),
    KEY `created_at` (`created_at`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;""".format(project_name=project_name)
        table_user_sql = """CREATE TABLE `{project_name}_user` (
    `distinct_id` varchar(200) NOT NULL,
    `lib` varchar(127) NOT NULL,
    `map_id` varchar(200) NOT NULL,
    `original_id` varchar(200) NOT NULL,
    `user_id` varchar(255) DEFAULT NULL,
    `all_user_profile` json DEFAULT NULL,
    `created_at` int(11) DEFAULT NULL,
    `updated_at` int(11) DEFAULT NULL,
    PRIMARY KEY (`distinct_id`,`lib`,`map_id`,`original_id`),
    KEY `distinct_id` (`distinct_id`),
    KEY `map_id` (`map_id`),
    KEY `original_id` (`original_id`),
    KEY `distinct_id_lib` (`distinct_id`,`lib`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;""".format(project_name=project_name)
        table_properties_sql = """CREATE TABLE `{project_name}_properties` (
    `lib` varchar(255) NOT NULL,
    `remark` varchar(255) NOT NULL,
    `event` varchar(255) NOT NULL,
    `properties` json DEFAULT NULL,
    `properties_len` int(10) DEFAULT NULL,
    `created_at` int(10) DEFAULT NULL,
    `updated_at` int(10) DEFAULT NULL,
    `lastinsert_at` int(10) DEFAULT NULL,
    `total_count` bigint(20) DEFAULT NULL,
    PRIMARY KEY (`lib`,`remark`,`event`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;;""".format(project_name=project_name)
        do_tidb_exe(table_sql)
        print(project_name+'table表单已插入')
        do_tidb_exe(table_device_sql)
        print(project_name+'device表单已插入')
        do_tidb_exe(table_user_sql)
        print(project_name+'user表单已插入')
        do_tidb_exe(table_properties_sql)
        print(project_name+'properties表单已插入')
        sql_insert_status_code = """CREATE TABLE IF NOT EXISTS `status_code` (
    `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'id',
    `desc` varchar(255) DEFAULT NULL COMMENT '含义',
    `p_id` int(11) DEFAULT NULL COMMENT '父id',
    PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin AUTO_INCREMENT=1;"""
        do_tidb_exe(sql_insert_status_code)
        print('状态码表创建完')
        status_codes = ["INSERT IGNORE INTO `events`.`status_code`(`id`, `desc`, `p_id`) VALUES (1, '分群列表状态', 0);","INSERT IGNORE INTO `events`.`status_code`(`id`, `desc`, `p_id`) VALUES (2, '创建列表开始', 1);","INSERT IGNORE INTO `events`.`status_code`(`id`, `desc`, `p_id`) VALUES (3, '分群信息写入中', 1);","INSERT IGNORE INTO `events`.`status_code`(`id`, `desc`, `p_id`) VALUES (4, '分群写入完成并包含错误', 1);","INSERT IGNORE INTO `events`.`status_code`(`id`, `desc`, `p_id`) VALUES (5, '分群写入完成', 1);","INSERT IGNORE INTO `events`.`status_code`(`id`, `desc`, `p_id`) VALUES (6, '分群写入失败', 1);","INSERT IGNORE INTO `events`.`status_code`(`id`, `desc`, `p_id`) VALUES (7, '生效策略', 0);","INSERT IGNORE INTO `events`.`status_code`(`id`, `desc`, `p_id`) VALUES (8, '自动', 7);","INSERT IGNORE INTO `events`.`status_code`(`id`, `desc`, `p_id`) VALUES (9, '手动', 7);","INSERT IGNORE INTO `events`.`status_code`(`id`, `desc`, `p_id`) VALUES (10, '禁用', 7);","INSERT IGNORE INTO `events`.`status_code`(`id`, `desc`, `p_id`) VALUES (11, '进入分群队列', 1);","INSERT IGNORE INTO `events`.`status_code`(`id`, `desc`, `p_id`) VALUES (12, '优先级', 0);","INSERT IGNORE INTO `events`.`status_code`(`id`, `desc`, `p_id`) VALUES (13, '普通', 12);","INSERT IGNORE INTO `events`.`status_code`(`id`, `desc`, `p_id`) VALUES (14, '高', 12);","INSERT IGNORE INTO `events`.`status_code`(`id`, `desc`, `p_id`) VALUES (15, '最高', 12);","INSERT IGNORE INTO `events`.`status_code`(`id`, `desc`, `p_id`) VALUES (16, '已添加任务队列', 1);","INSERT IGNORE INTO `events`.`status_code`(`id`, `desc`, `p_id`) VALUES (17, '任务已被选取', 1);","INSERT IGNORE INTO `events`.`status_code`(`id`, `desc`, `p_id`) VALUES (18, '任务方法加载完', 1);","INSERT IGNORE INTO `events`.`status_code`(`id`, `desc`, `p_id`) VALUES (19, '任务执行成功', 1);","INSERT IGNORE INTO `events`.`status_code`(`id`, `desc`, `p_id`) VALUES (20, '分群ETL失败', 1);","INSERT IGNORE INTO `events`.`status_code`(`id`, `desc`, `p_id`) VALUES (21, '任务执行失败', 1);","INSERT IGNORE INTO `events`.`status_code`(`id`, `desc`, `p_id`) VALUES (22, '通知方式', 0);","INSERT IGNORE INTO `events`.`status_code`(`id`, `desc`, `p_id`) VALUES (23, 'email', 22);","INSERT IGNORE INTO `events`.`status_code`(`id`, `desc`, `p_id`) VALUES (24, '自动分群但不自动应用模板', 7);","INSERT IGNORE INTO `events`.`status_code`(`id`, `desc`, `p_id`) VALUES (25, '推送状态', 0);","INSERT IGNORE INTO `events`.`status_code`(`id`, `desc`, `p_id`) VALUES (26, '推送成功', 25);","INSERT IGNORE INTO `events`.`status_code`(`id`, `desc`, `p_id`) VALUES (27, '推送失败', 25);","INSERT IGNORE INTO `events`.`status_code`(`id`, `desc`, `p_id`) VALUES (28, '自动分群自动应用模板但不自动发送', 7);"]
        for code in status_codes:
            do_tidb_exe(code)
        print('状态码添加完毕')
        sql_scheduler_jobs = """CREATE TABLE IF NOT EXISTS `scheduler_jobs` (
        `id` int(11) NOT NULL AUTO_INCREMENT COMMENT '任务id',
        `project` varchar(255) DEFAULT NULL COMMENT '项目id',
        `group_id` int(11) DEFAULT NULL COMMENT 'group_plan的id',
        `list_index` int(11) DEFAULT NULL COMMENT 'group_index任务完成后,补充',
        `datetime` int(11) DEFAULT NULL COMMENT '执行的日期,即要执行的那个任务的时间(不是任务执行时间,是要执行的时间。如周三时执行周一的任务。也用来防止任务重复添加)',
        `data` json DEFAULT NULL COMMENT '其他附带的参数',
        `priority` int(4) DEFAULT NULL COMMENT '优先级',
        `status` int(4) DEFAULT NULL COMMENT '状态',
        `created_at` int(11) DEFAULT NULL COMMENT '创建时间',
        `updated_at` int(11) DEFAULT NULL COMMENT '更新时间',
        PRIMARY KEY (`id`),
        UNIQUE KEY `ind_task` (`project`,`group_id`,`datetime`)
    ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin AUTO_INCREMENT=1;"""
        do_tidb_exe(sql_scheduler_jobs)
        print('任务计划表添加完毕')
        insert_data = """CREATE TABLE IF NOT EXISTS `{project_name}_usergroup_data` (
    `id` int(11) NOT NULL AUTO_INCREMENT,
    `group_list_id` int(11) DEFAULT NULL COMMENT '分群列表id',
    `data_index` int(11) DEFAULT NULL COMMENT '最新一组数据的index_id',
    `data_key` varchar(255) DEFAULT NULL COMMENT '数据的唯一识别id',
    `data_json` json DEFAULT NULL COMMENT '数据包',
    `enable` int(11) DEFAULT NULL COMMENT '生效策略。参考status_code,p_id=7',
    `created_at` int(11) DEFAULT NULL,
    `updated_at` int(11) DEFAULT NULL,
    PRIMARY KEY (`id`),
    KEY `group_list_id` (`group_list_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin AUTO_INCREMENT=1;""".format(project_name=project_name)
        do_tidb_exe(insert_data)
        insert_list = """CREATE TABLE IF NOT EXISTS `{project_name}_usergroup_list` (
    `id` int(11) NOT NULL AUTO_INCREMENT COMMENT '分群列表id',
    `group_id` int(11) DEFAULT NULL COMMENT '分群id',
    `group_list_index` int(11) DEFAULT NULL COMMENT '分群列表顺位',
    `list_init_date` int(11) DEFAULT NULL COMMENT '触发时间',
    `list_desc` varchar(255) DEFAULT NULL COMMENT '清单所描述的',
    `jobs_id` int(4) DEFAULT NULL COMMENT 'scheduler_jbos的id',
    `item_count` int(11) DEFAULT NULL COMMENT '分组条目数',
    `status` int(4) DEFAULT NULL COMMENT '分群状态。参考status_code,p_id=1',
    `complete_at` int(11) DEFAULT NULL COMMENT '分群完成时间',
    `apply_temple_times` int(2) DEFAULT 0 COMMENT '被套用模板的次数',
    `created_at` int(11) DEFAULT NULL COMMENT '条目创建时间',
    `updated_at` int(11) DEFAULT NULL COMMENT '条目更新时间',
    PRIMARY KEY (`id`),
    UNIQUE KEY `unique_key` (`group_id`,`group_list_index`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin AUTO_INCREMENT=1;""".format(project_name=project_name)
        do_tidb_exe(insert_list)
        insert_plan = """CREATE TABLE IF NOT EXISTS `{project_name}_usergroup_plan` (
    `id` int(11) NOT NULL AUTO_INCREMENT COMMENT '分群id',
    `group_title` varchar(255) DEFAULT NULL COMMENT '分群标题',
    `group_desc` varchar(255) DEFAULT NULL COMMENT '分群描述',
    `func` json DEFAULT NULL COMMENT '分群执行方法参考/scheduler_jobs/scheduler_job_creator.py',
    `latest_data_list_index` int(11) DEFAULT NULL COMMENT '最新一组数据的id',
    `repeatable` varchar(20) DEFAULT NULL COMMENT '定时器,分,时,日,月,周。不填的用*代替。跟crontab一个逻辑,不支持1-10的方式表达,多日的需要1,2,3,4,5,6,7,8这样的形式填',
    `priority` int(4) DEFAULT NULL COMMENT '任务执行优先级',
    `latest_data_time` int(11) DEFAULT NULL COMMENT '最新一组数据的完成时间',
    `repeat_times` int(11) DEFAULT 0 COMMENT '分群完成次数',
    `enable_policy` int(11) DEFAULT NULL COMMENT '生效策略。参考status_code,p_id=7',
    `latest_apply_temple_id` int(11) DEFAULT NULL COMMENT '最后一次执行的模板类型',
    `latest_apply_temple_time` int(11) DEFAULT NULL COMMENT '最后一次执行的模型时间',
    `created_at` int(11) DEFAULT NULL COMMENT '创建时间',
    `updated_at` int(11) DEFAULT NULL COMMENT '更新时间',
    PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin AUTO_INCREMENT=1;""".format(project_name=project_name) 
        do_tidb_exe(insert_plan)
        print(project_name+'的分群附加表表已添加完')
        do_tidb_exe(insert_list)
        insert_noti = """CREATE TABLE IF NOT EXISTS `{project_name}_noti` (
    `id` int(11) NOT NULL AUTO_INCREMENT,
    `plan_id` int(11) DEFAULT NULL COMMENT '计划id',
    `list_id` int(11) DEFAULT NULL COMMENT '列表id',
    `data_id` int(11) DEFAULT NULL COMMENT '数据id',
    `temple_id` int(4) DEFAULT NULL COMMENT '模板id',
    `noti_group_id` int(11) DEFAULT NULL COMMENT '消息群组id',
    `distinct_id` varchar(512) DEFAULT NULL COMMENT '用户识别id',
    `priority` int(4) DEFAULT NULL COMMENT '优先级',
    `status` int(4) DEFAULT NULL COMMENT '状态',
    `owner` varchar(255) DEFAULT NULL COMMENT '添加人',
    `level` int(4) DEFAULT NULL COMMENT '消息级别',
    `type` int(4) DEFAULT NULL COMMENT '消息类型',
    `key` varchar(255) DEFAULT NULL COMMENT '消息接受方式key',
    `content` json DEFAULT NULL COMMENT '消息内容',
    `send_at` int(11) DEFAULT NULL COMMENT '计划发送时间',
    `recall_result` text DEFAULT NULL COMMENT '发送结果',
    `created_at` int(11) DEFAULT NULL COMMENT '创建时间',
    `updated_at` int(11) DEFAULT NULL COMMENT '更新时间',
    PRIMARY KEY (`id`),
    KEY `distinct_id` (`distinct_id`),
    KEY `send_plan` (`status`,`priority`,`send_at`),
    KEY `key` (`key`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin AUTO_INCREMENT=1;""".format(project_name=project_name)
        do_tidb_exe(insert_noti)
        insert_noti_group = """CREATE TABLE IF NOT EXISTS `{project_name}_noti_group` (
    `id` int(11) NOT NULL AUTO_INCREMENT,
    `plan_id` int(11) DEFAULT NULL COMMENT '分群计划id',
    `list_id` int(11) DEFAULT NULL COMMENT '分群列表id',
    `data_id` int(11) DEFAULT NULL COMMENT '分群数据id',
    `temple_id` int(11) DEFAULT NULL COMMENT '应用模板id',
    `priority` int(4) DEFAULT NULL COMMENT '优先级id',
    `status` int(4) DEFAULT NULL COMMENT '状态id',
    `owner` varchar(255) DEFAULT NULL COMMENT '添加人',
    `send_at` int(11) DEFAULT NULL COMMENT '计划发送时间',
    `sent` int(11) DEFAULT NULL COMMENT '已发送数目',
    `total` int(11) DEFAULT NULL COMMENT '该计划总数目',
    `created_at` int(11) DEFAULT NULL COMMENT '创建时间',
    `updated_at` int(11) DEFAULT NULL COMMENT '更新时间',
    PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin AUTO_INCREMENT=1001;""".format(project_name=project_name)
        do_tidb_exe(insert_noti_group)
        insert_noti_temple = """CREATE TABLE IF NOT EXISTS `{project_name}_noti_temple` (
    `id` int(11) NOT NULL AUTO_INCREMENT,
    `name` varchar(255) DEFAULT NULL COMMENT '模板名称',
    `temple_desc` varchar(255) DEFAULT NULL COMMENT '模板描述',
    `args` json DEFAULT NULL COMMENT '模板参数',
    `content` json DEFAULT NULL COMMENT '模板内容',
    `apply_times` int(11) DEFAULT 0 COMMENT '应用次数',
    `lastest_apply_time` int(11) DEFAULT NULL COMMENT '最后一次应用时间',
    `lastest_apply_list` int(11) DEFAULT NULL COMMENT '最后一次应用列表',
    `created_at` int(11) DEFAULT NULL COMMENT '创建时间',
    `updated_at` int(11) DEFAULT NULL COMMENT '更新时间',
    PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin AUTO_INCREMENT=1001;""".format(project_name=project_name)
        do_tidb_exe(insert_noti_temple)
        print(project_name+'的消息附加表表已添加完')
        if expired == None:
            expired_at = 2147483647
        else:
            expired_at = int(time.mktime(time.strptime(expired, "%Y-%m-%d")))
        timenow = int(time.time())
        insert_project_list = """insert project_list (`project_name`,`created_at`,`expired_at`) values ('{project_name}',{created_at},{expired_at})""".format(project_name=project_name,created_at=timenow,expired_at=expired_at)
        do_tidb_exe(insert_project_list)
        print(project_name+'project列表已插入')
	def ctime_to_datetime(self):
		self.datetime = datetime.datetime.utcfromtimestamp(time.mktime(time.strptime(self.ctime, self.struct)))
		
		return self.datetime
Example #46
0
def main():

    if (min_max_len('min') is None) and len(sys.argv) != 3:
        print("\n   Данных нет. Введите начальное " +
              "и конечное значение для диапазона даты")
        sys.exit()

    elif len(sys.argv) == 1 and min_max_len('min') is not None:
        print("\n   Введите начальное и конечное значение даты")
        sys.exit()

    elif sys.argv[1] == "-mml":
        print("\n   Начальная дата  (общ.): %s" %
              datetime.utcfromtimestamp(min_max_len("min")))

        print("   Конечная дата   (общ.): %s" %
              datetime.utcfromtimestamp(min_max_len("max")))

        print("   Количесвто вопросов:    %s" % min_max_len("len"))
        sys.exit()

    elif sys.argv[1] == "-help":
        print_help()

    # начало считывания времени выполнения работы цикла
    time.clock()
    start_date = 0
    end_date = 0

    if (len(sys.argv) == 2
            and (sys.argv[1] != "-mml" or sys.argv[1] != "-help")):
        start_date = datetime.utcfromtimestamp(min_max_len("max")).timetuple()
        end_date = datetime.utcfromtimestamp(
            calendar.timegm(time.strptime(sys.argv[1],
                                          '%d.%m.%Y'))).timetuple()
        # print(end_date)
        mass = []
        mass2 = []

        for i in start_date:
            mass.append(i)
        start_date2 = mass[:6]

        for i in end_date:
            mass2.append(i)
        end_date2 = mass2[:6]

        # цикл с вызовом вопросов по дате выполняющийся
        # по месяцам, с конечной даты
        # по указаную в аргументах
        while True:

            # если месяц 12-ый, то увеличиваем
            # год и начинаем с 1-го месяца
            if start_date2[1] >= 11:
                start_date2[0] += 1
                start_date2[1] = 1
                questions = recieve_questions(
                    min_max_len("max") + 1,
                    datetime(start_date2[0], start_date2[1], start_date2[2],
                             start_date2[3], start_date2[4], start_date2[5]))

            # указываем если конечная приблизилась
            # к указаной и равна или меньше на месяц,
            # то мы выполняем извлечение по указаную дату
            elif start_date2[0] == end_date2[0] and (
                    end_date2[1] - start_date2[1] == 1
                    or end_date2[1] == start_date2[1]):
                questions = recieve_questions(
                    min_max_len("max") + 1,
                    datetime(end_date2[0], end_date2[1], end_date2[2],
                             end_date2[3], end_date2[4], end_date2[5]))
                save_to_db(questions)
                break

            # в любом другом случае увеличиваем
            # на месяц конечную дату
            else:
                start_date2[1] += 1
                questions = recieve_questions(
                    min_max_len("max") + 1,
                    datetime(start_date2[0], start_date2[1], start_date2[2],
                             start_date2[3], start_date2[4], start_date2[5]))
            save_to_db(questions)

    # если мы указали в аргументах две даты
    # начала и конца, то выполнить извлечение по этим датам
    elif len(sys.argv) == 3:
        dt = [int(i) for i in sys.argv[1].split('.')]
        dt2 = [int(i) for i in sys.argv[2].split('.')]
        questions = recieve_questions(datetime(dt[2], dt[1], dt[0]),
                                      datetime(dt2[2], dt2[1], dt2[0]))
        save_to_db(questions)

    # логирование результатов
    logger.info("-" * 80)
    logger.info("   Начальная дата (общ.): %s" %
                datetime.utcfromtimestamp(min_max_len("min")))
    logger.info("   Конечная дата  (общ.): %s" %
                datetime.utcfromtimestamp(min_max_len("max")))
    logger.info("   Всего количество вопросов: %s" % min_max_len("len"))
    logger.info("   Время выпонения скрипта " + str(round(time.clock(), 2)) +
                " сек. или " + str(round(time.clock() / 60, 2)) + " мин.")
    logger.debug("\n")

    curs.close()
    conn.close()
	def ctime_to_structtime(self):
		self.structtime = time.strptime(self.ctime, self.struct)	
		
		return self.structtime	
Example #48
0
from brewer.models import Brewer
from deck.models import Deck, DeckCard
from card.models import Card

WIZARDS = "http://magic.wizards.com"
MTGO_LISTS = "/en/content/deck-lists-magic-online-products-game-info"

page = requests.get(WIZARDS + MTGO_LISTS)
tree = html.fromstring(page.content)

events = tree.xpath("//div[contains(@class,'article-item')]")
for event in events:
    url = WIZARDS + event[0].attrib['href']
    dates = event.xpath("//span[@class='date']")[0]
    month = dates[0].text.strip()
    date = str(dates[2].text).strip() + "-" + str(time.strptime(month.strip(), "%B").tm_mon) + "-" + str(dates[1].text).strip()
    event_page = requests.get(url)
    event_tree = html.fromstring(event_page.content)
    decks = event_tree.xpath("//div[@class='deck-group']")
    event_name = event_tree.xpath("//div[@id='main-content']/h1/text()")
    if (Event.objects.filter(name=event_name, date=date).exists()):
        print("It exists!")
        break
    for deck in decks:
        deck_list = []
        brewer = deck.xpath("span[@class='deck-meta']/h4/text()")[0]
        deck_element = deck.xpath("div/div[@class='deck-list-text']/div[contains(@class, 'sorted-by-overview-container')]")[0]
        sideboard_element = deck.xpath("div/div[@class='deck-list-text']/div[contains(@class, 'sorted-by-sideboard-container')]")
        deck_subs = deck_element.xpath("div[contains(@class, 'element')]/span[@class='row']")
        for card in deck_subs:
            numberOf = card.xpath("span[@class='card-count']/text()")[0]
Example #49
0
def timestamp_from_string(datestring, template='%Y-%m-%d'):
    if datestring:
        return calendar.timegm(time.strptime(datestring, '%Y-%m-%d'))
	def ctime_to_timestamp(self):
		self.timestamp = time.mktime(time.strptime(self.ctime, self.struct))
		
		return self.timestamp
Example #51
0
    def __init__(self):
        self.my_tags = dict()
        self.my_type = dict()
        self.my_mver = dict()
        self.my_models = list()

        calculation_info = schema.get('calculation_info')

        for item in os.listdir(BASEDIR):
            if os.path.isdir(BASEDIR + item):

                internaldir = os.listdir(BASEDIR + item)
                if not 'version0001' in internaldir:
                    continue

                mlabel = None
                try:
                    f = open(BASEDIR + item + '/service-label.txt')
                    mlabel = f.readline()[:-1]
                    mtype = f.readline()[:-1]
                    f.close()
                except:
                    continue

                # model properties common to all model versions for this endpoint
                rtype = schema.get("result_endpoint").schema
                if mtype == 'qualitative':
                    rtype['properties']['value'] = {
                        "enum": ["positive", "negative"]
                    }

                # only for exposed models (eTOXlab version > 0.91)
                nmodels = 0
                try:
                    f = open(BASEDIR + item + '/service-version.txt')
                except:
                    continue

                for line in f:
                    if line[-1] == '\n': line = line[:-1]

                    line_versions = line.split('\t')

                    ## old syntax ( eTOXlab < 0.95 ) for backwards compatibility only
                    if len(line_versions) == 1:
                        try:
                            mver = int(line_versions[0]
                                       )  # internal (eTOXlab) model version
                            ever = 1  # external (eTOXsys) model version
                        except:
                            break

                        if not os.path.isdir(BASEDIR + item + '/version%0.4d' %
                                             (mver)):
                            break

                        mid = 'eTOXvault ID ' + mlabel + ' ' + PARTNER_ID

                        ## new API with version support
                        try:
                            new_model = calculation_info.create_object(
                                id=mlabel,
                                category="ENDPOINT",
                                version=str(ever),
                                external_id=mid)
                        except:
                            new_model = calculation_info.create_object(
                                id=mlabel,
                                category="ENDPOINT",
                                external_id=mid)

                        new_model['return_type_spec'] = rtype

                        self.my_models.append(new_model)
                        self.my_mver[mlabel, str(ever)] = mver
                        nmodels += 1
                        break

                    try:
                        mver = int(line_versions[0]
                                   )  # internal (dir tree    ) model version
                        ever = int(line_versions[1]
                                   )  # external (user defined) model version
                    except:
                        continue

                    if ever == 0:  # this model has not been exposed
                        continue

                    if not os.path.isdir(BASEDIR + item +
                                         '/version%0.4d' % mver
                                         ):  # make sure this version exists
                        continue

                    mid = 'eTOXvault ID ' + mlabel + ' ' + PARTNER_ID

                    ## new API with version support
                    try:
                        new_model = calculation_info.create_object(
                            id=mlabel,
                            category="ENDPOINT",
                            version=str(ever),
                            external_id=mid)
                    except:
                        new_model = calculation_info.create_object(
                            id=mlabel, category="ENDPOINT", external_id=mid)

                    new_model['return_type_spec'] = rtype

                    # read licensing information from a file at the endpoint root
                    if os.path.isfile(BASEDIR + item +
                                      '/licensing-status.txt'):
                        flic = open(BASEDIR + item + '/licensing-status.txt',
                                    'r')

                        licenses = []
                        for licline in flic:
                            licline = licline.rstrip()
                            if len(licline) > 10:
                                liclist = licline.split('\t')
                                if len(liclist) == 2:
                                    lic_info = {
                                        'license_end':
                                        time.mktime(
                                            time.strptime(
                                                liclist[1], "%d-%b-%Y")),
                                        'license_info':
                                        liclist[0]
                                    }
                                    licenses.append(lic_info)

                        flic.close()

                        if len(licenses) > 0:
                            new_model['license_infos'] = []
                            for li in licenses:
                                new_model['license_infos'].append(li)

                    # append the new model to the list of models
                    self.my_models.append(new_model)
                    self.my_mver[mlabel, str(ever)] = mver
                    nmodels += 1
                f.close()

                if nmodels == 0:  # do not add item and mtype unless there is any published model
                    continue

                # my_program lists the endpoint tags (e.g. CACO2, ABCB1)
                self.my_tags[mlabel] = item
                self.my_type[mlabel] = mtype
Example #52
0
def getFlashDateString():
	try:
		return time.strftime(_("%Y-%m-%d %H:%M"), time.strptime(open("/etc/version").read().strip(), '%Y%m%d%H%M'))
	except:
		return _("unknown")
Example #53
0
File: cafe.py Project: tvkr/molly
def storeNewCups():

    coffees = []

    # import pre-booking configurations

    if (os.path.isfile(recipeFile) == True):

        sweets = "INFO: task-importing operation started"
        sweetsOut(sweets)
        coffeeBeans = "INFO: task-importing operation started"
        beansOut(coffeeBeans)

        recipeFromFile = open(recipeFile)

        # json file is easily wrong in format, so placed the exception handler
        try:
            toImportTasks = json.load(recipeFromFile)
        except ValueError:
            sweets = "ERROR: task-importing operation failed for invalid recipe file, operation canceled"
            sweetsOut(sweets)
            coffeeBeans = "ERROR: task-importing operation failed for invalid recipe file, operation canceled"
            beansOut(coffeeBeans)
        else:
            for toImportTaskIndex, toImportTask in enumerate(toImportTasks):
                toImportTaskTriggerAt = toImportTask['triggerAt']
                toImportTaskTriggerAtTimeStructure = time.strptime(
                    toImportTaskTriggerAt, "%Y-%m-%d %H:%M:%S")
                toImportTaskId = time.strftime(
                    "%Y%m%d%H%M%S", toImportTaskTriggerAtTimeStructure)
                toImportTask['taskId'] = toImportTaskId
                toImportTask['attemptedTimes'] = 0

                coffeeBeans = "INFO: imported task with id [" + toImportTask[
                    'taskId'] + "], details: " + json.dumps(toImportTask,
                                                            ensure_ascii=False)
                beansOut(coffeeBeans)

                coffees.append(toImportTask)

            if (os.path.exists(shelfDirectory) == False):
                os.system("mkdir " + shelfDirectory)

            for coffee in coffees:

                taskFileName = coffee['taskId']

                os.system("touch " + shelfDirectory + taskFileName)

                coffee = json.dumps(coffee, ensure_ascii=False)

                coffeeFile = open(shelfDirectory + taskFileName, 'a')
                contentLineToWrite = str(coffee)
                coffeeFile.write(coffee)
                coffeeFile.close()

            os.system('rm ' + recipeFile)

            sweets = "INFO: task imported successfully"
            sweetsOut(sweets)
            coffeeBeans = "INFO: task imported successfully"
            beansOut(coffeeBeans)

    return True
Example #54
0
def convert_time_to_sec(time_string='0:0:0'):
    x = time.strptime(time_string,'%H:%M:%S')
    return datetime.timedelta(hours=x.tm_hour,minutes=x.tm_min,seconds=x.tm_sec).total_seconds()
def srmeta(indir, mfile, errorlog, folder):
    path, dirs, files = next(os.walk(folder))
    file_count = len(files)
    i = 1
    with open(mfile, 'wb') as csvfile:
        writer = csv.DictWriter(
            csvfile,
            fieldnames=[
                "id_no", "system:time_start", "product_type", "orbit",
                "provider", "instrument", "satellite_id", "number_of_bands",
                "epsg_code", "resampling_kernel", "number_of_rows",
                "number_of_columns", "gsd", "cloud_cover", "incidence_angle",
                "sun_azimuth", "sun_elevation", "azimuth_angle",
                "spacecraft_angle", "atmospheric_model", "aerosol_Model",
                "aot_method", "aot_std", "aot_used", "aot_Status",
                "aot_mean_quality", "luts_version", "aot_coverage",
                "aot_source", "atmospheric_correction_algorithm"
            ],
            delimiter=',')
        writer.writeheader()
    for filename in os.listdir(indir):
        if filename.endswith(".tif"):
            infilename = os.path.join(
                folder, filename.replace("SR.tif", "metadata.xml"))
            try:
                xmldoc = minidom.parse(infilename)
                ps4band = xmldoc.getElementsByTagName(
                    'ps:EarthObservationMetaData')[0]
                eopfilename = xmldoc.getElementsByTagName(
                    'eop:identifier')[0].firstChild.data
                productType = xmldoc.getElementsByTagName(
                    'eop:productType')[0].firstChild.data
                orbit = xmldoc.getElementsByTagName(
                    'eop:orbitType')[0].firstChild.data
                acquisition = xmldoc.getElementsByTagName(
                    'eop:acquisitionDate')[0].firstChild.data
                provider = xmldoc.getElementsByTagName(
                    "eop:shortName")[0].firstChild.data
                instrument = xmldoc.getElementsByTagName(
                    "eop:shortName")[1].firstChild.data
                satellite_id = xmldoc.getElementsByTagName(
                    "eop:serialIdentifier")[0].firstChild.data
                bands = xmldoc.getElementsByTagName(
                    "ps:numBands")[0].firstChild.data
                epsg_code = xmldoc.getElementsByTagName(
                    "ps:epsgCode")[0].firstChild.data
                resampling_kernel = xmldoc.getElementsByTagName(
                    "ps:resamplingKernel")[0].firstChild.data
                number_rows = xmldoc.getElementsByTagName(
                    "ps:numRows")[0].firstChild.data
                number_columns = xmldoc.getElementsByTagName(
                    "ps:numColumns")[0].firstChild.data
                gsd = xmldoc.getElementsByTagName(
                    "ps:rowGsd")[0].firstChild.data
                cloud = xmldoc.getElementsByTagName(
                    "opt:cloudCoverPercentage")[0].firstChild.data
                psb = xmldoc.getElementsByTagName(
                    "ps:bandNumber")[0].firstChild.data
                psb1 = xmldoc.getElementsByTagName(
                    "ps:bandNumber")[1].firstChild.data
                psb3 = xmldoc.getElementsByTagName(
                    "ps:bandNumber")[2].firstChild.data
                psb4 = xmldoc.getElementsByTagName(
                    "ps:bandNumber")[3].firstChild.data
                psia = xmldoc.getElementsByTagName(
                    "eop:incidenceAngle")[0].firstChild.data
                psilaz = xmldoc.getElementsByTagName(
                    "opt:illuminationAzimuthAngle")[0].firstChild.data
                psilelv = xmldoc.getElementsByTagName(
                    "opt:illuminationElevationAngle")[0].firstChild.data
                psaz = xmldoc.getElementsByTagName(
                    "ps:azimuthAngle")[0].firstChild.data
                pssca = xmldoc.getElementsByTagName(
                    "ps:spaceCraftViewAngle")[0].firstChild.data
                date_time = acquisition.split('T')[0]
                pattern = '%Y-%m-%d'
                epoch = int(time.mktime(time.strptime(date_time,
                                                      pattern))) * 1000
                print("Processing " + str(i) + " of " + str(file_count))
                i = i + 1
                gtif = gdal.Open(os.path.join(indir, filename))
                date_time = gtif.GetMetadata()['TIFFTAG_DATETIME'].split(
                    " ")[0]
                pattern = '%Y:%m:%d'
                epoch = int(time.mktime(time.strptime(date_time,
                                                      pattern))) * 1000
                conv = json.loads(
                    gtif.GetMetadata()['TIFFTAG_IMAGEDESCRIPTION'])
                sid = str(filename).split("_")[2]
                atmmodel = (
                    conv['atmospheric_correction']['atmospheric_model'])
                aotmethod = (conv['atmospheric_correction']['aot_method'])
                aotused = (conv['atmospheric_correction']['aot_used'])
                aotstat = (conv['atmospheric_correction']['aot_status'])
                aotstd = (conv['atmospheric_correction']['aot_std'])
                aotmq = (conv['atmospheric_correction']['aot_mean_quality'])
                luts = (conv['atmospheric_correction']['luts_version'])
                aotcov = (conv['atmospheric_correction']['aot_coverage'])
                arsm = (conv['atmospheric_correction']['aerosol_model'])
                aotsor = (conv['atmospheric_correction']['aot_source'])
                atcoralgo = (conv['atmospheric_correction']
                             ['atmospheric_correction_algorithm'])
                with open(mfile, 'a') as csvfile:
                    writer = csv.writer(csvfile,
                                        delimiter=',',
                                        lineterminator='\n')
                    writer.writerow([
                        filename.split('.')[0], epoch, productType, orbit,
                        provider, instrument, satellite_id, bands, epsg_code,
                        resampling_kernel, number_rows, number_columns,
                        format(float(gsd), '.2f'),
                        format(float(cloud), '.2f'),
                        format(float(psia), '.4f'),
                        format(float(psilaz), '.2f'),
                        format(float(psilelv), '.2f'),
                        format(float(psaz), '.2f'),
                        format(float(pssca), '.4f'),
                        str(atmmodel),
                        str(arsm),
                        str(aotmethod),
                        format(float(aotstd), '.4f'),
                        format(float(aotused), '.4f'),
                        str(aotstat), aotmq, luts,
                        format(float(aotcov), '.4f'),
                        str(aotsor),
                        str(atcoralgo)
                    ])
                csvfile.close()
            except Exception as e:
                print(e)
                print("Issues with : " + str(os.path.splitext(filename)[0]))
                with open(errorlog, 'a') as csvfile:
                    writer = csv.writer(csvfile,
                                        delimiter=',',
                                        lineterminator='\n')
                    writer.writerow([filename])
                csvfile.close()
Example #56
0
def _jinja2_filter_datetime(date, fmt=None):
    pyDate = time.strptime(date,'%a %b %d %H:%M:%S +0000 %Y') # convert twitter date string into python date/time
    return time.strftime('%Y-%m-%d %H:%M:%S', pyDate) # return the formatted date.
Example #57
0
def trans_timestamp(date_str):
    time_array = time.strptime(date_str, "%Y_%m_%d")
    timestamp = time.mktime(time_array)
    return int(timestamp)
Example #58
0
File: cafe.py Project: tvkr/molly
        currentDate = time.strftime('%Y-%m-%d', time.localtime(time.time()))
        currentTime = time.strftime('%H:%M:%S', time.localtime(time.time()))
        currentTimestamp = int(time.time())

        # first, see whether there are tasks to do

        sweets = "INFO: checking task queue for operation"
        sweetsOut(sweets)

        coffees = fetchStoredCoffeesList()

        if (fetchStoredCoffeesList() != []):

            for index, coffeeId in enumerate(coffees):
                taskTriggerAt = coffeeId
                taskTriggerAtTimeStructure = time.strptime(
                    taskTriggerAt, "%Y%m%d%H%M%S")
                taskTriggerAtTimeStamp = time.mktime(
                    taskTriggerAtTimeStructure)

                if (int(currentTimestamp) - int(taskTriggerAtTimeStamp) < 0):
                    sweets = "INFO: task with id [" + str(
                        coffeeId) + "] is currently waiting to be triggered"
                    sweetsOut(sweets)
                else:

                    coffee = fetchStoredCoffee(coffeeId)

                    attemptedTimes = coffee['attemptedTimes']
                    expectedTimeStructure = time.strptime(
                        coffee['triggerAt'], "%Y-%m-%d %H:%M:%S")
                    expectedTimeStamp = int(time.mktime(expectedTimeStructure))
reload(sys)
sys.setdefaultencoding('utf-8')

import json
import datetime
import time

from rec_driver import *
from pymysql import PyMysql

time_now = int(time.time())
time_value = time.localtime(time_now)
time_year_str = time.strftime('%Y', time_value)
time_day_str = time.strftime('%Y-%m-%d', time_value)
time_day_str_all = time_day_str + " 00:00:00"
time_s = time.mktime(time.strptime(time_day_str_all, '%Y-%m-%d %H:%M:%S'))
time_day = int(time_s)

time_yesterday = time_day - 86400
time_value_yesterday = time.localtime(time_yesterday)
time_yesterday_str = time.strftime('%Y-%m-%d', time_value_yesterday)
time_yesterday_str_all = time_yesterday_str + " 00:00:00"

time_days_before = time_day - (15 * 86400)
time_value_days_before = time.localtime(time_days_before)
time_days_before_str = time.strftime('%Y-%m-%d', time_value_days_before)
time_days_before_str_all = time_days_before_str + " 00:00:00"

# print time_now
print 'start'
print time_day_str_all
Example #60
0
def setFirstWeekDate(firstWeekDate):
	global DONE_firstWeekDate
	DONE_firstWeekDate = time.strptime(firstWeekDate,'%Y%m%d')
	print("Now running: setFirstWeekDate():", DONE_firstWeekDate)