Exemplo n.º 1
0
def facturas_no_pagadas():
    print 'funcionando'
    facturas_no_pagadas = Factura.objects.filter(pagada=False)
    texto = ''
    
    inicio_mes = False
    if datetime.date(datetime.now()).day == 1:
        print "dia de envio"
        inicio_mes=True
        for factura in facturas_no_pagadas:
    	    diferencia = datetime.date(datetime.now())-factura.fecha        	
            if diferencia>timedelta(factura.orden_servicio.cotizacion.contacto.cliente.dias_de_credito):
                texto += '''<li><b>Cliente :</b> %s<br>
                            <b>contacto :</b> %s<br>
                            <b>Telefono :</b> %s<br>
                            <b>Ext. :</b> %s<br>
                            <b>Email :</b> %s<br>
                            <b>Fecha de la factura :</b> %s<br>
                            <b>Fecha de vencimiento :</b> %s<br>
                            <b>Folio :</b> %s<br>
                            <b>Importe :</b> $%s USD<br>
                             </li><br><br>'''%(factura.orden_servicio.cotizacion.contacto.cliente.nombre,
                         	           factura.orden_servicio.cotizacion.contacto.nombre,
                         	           factura.orden_servicio.cotizacion.contacto.telefono,
                         	           factura.orden_servicio.cotizacion.contacto.extension,
                         	           factura.orden_servicio.cotizacion.contacto.email,
                         	           factura.fecha,
                         	           factura.fecha+timedelta(factura.orden_servicio.cotizacion.contacto.cliente.dias_de_credito),
                         	           factura.id,
                         	           factura.orden_servicio.cotizacion.importe)
        mandar_mail(texto,inicio_mes)                     
Exemplo n.º 2
0
def execute(event):

    sourcetype = "bro_dns"

    print("Checking Splunk for events..."),

    sys.stdout.flush()

    sp = Splunk(
        host=SPLUNK_SEARCH_HEAD,
        port=SPLUNK_SEARCH_HEAD_PORT,
        username=SPLUNK_SEARCH_HEAD_USERNAME,
        password=SPLUNK_SEARCH_HEAD_PASSWORD,
        scheme=SPLUNK_SEARCH_HEAD_SCHEME,
    )

    if not event.adHoc:
        if hasattr(event, "ip_address"):
            event._include = 'id_orig_h="%s" OR id_resp_h="%s"' % (event.ip_address, event.ip_address)

    cirtaDT = epochToDatetime(event.cirta_id.split(".")[0])

    timedelta = (datetime.date(event._DT) - datetime.date(cirtaDT)).days

    earliest = timedelta - event._daysBefore

    latest = timedelta + 1 + event._daysAfter

    if earliest >= 0:
        earliest = "+" + str(earliest)

    if latest >= 0:
        latest = "+" + str(latest)

    query = """search eventtype="%s" earliest_time="%sd@d" latest_time="%sd@d" %s | table _raw""" % (
        sourcetype,
        earliest,
        latest,
        event._include,
    )

    log.debug('''msg="raw event query" query="%s"''' % query)

    results = sp.search(query)

    print("Done")

    if not results:
        log.warn("No %s events exist in Splunk" % sourcetype)
        return

    raw = [x["_raw"] for x in results]

    if raw:
        with open("%s.%s" % (event._baseFilePath, confVars.outputExtension), "w") as outFile:
            for row in raw:
                outFile.write(row + "\n")
        print("%s file: %s%s.%s" % (sourcetype, colors.OKGREEN, event._baseFilePath, confVars.outputExtension))

    event._splunk.push(sourcetype=confVars.splunkSourcetype, eventList=raw)
Exemplo n.º 3
0
    def clean(self):
        super(ProjectForm, self).clean()
        # super(NewsArticleForm, self).clean()
        # create the short_title automatically if necessary
        if not self.cleaned_data["short_title"] and self.cleaned_data.get("title"):
            if len(self.cleaned_data["title"]) > 70:
                raise forms.ValidationError("Please provide a short (less than 70 characters) version of the Title for the Short title field.")     
            else:
                self.cleaned_data["short_title"] = self.cleaned_data["title"]
        
        # misc checks
        if not self.cleaned_data["hosted_by"]:
            raise forms.ValidationError("A Host is required - please provide a Host")
            # must have body or url in order to be published
        if not self.instance and self.instance.body.cmsplugin_set.all():
        # if not self.cleaned_data["body"]:
            message = u"This will not be published until either an external URL or Plugin has been added. Perhaps you ought to do that now."
            messages.add_message(self.request, messages.WARNING, message)
        
        # copied from NewsArticleForm
        # TODO: move sticky stuff to separate mixin since it is used by both news and projects
        # sticky_until value must be greater than (later) than date
        date = datetime.date(self.cleaned_data['date'])
        self.cleaned_data['sticky_until'] = self.cleaned_data.get('sticky_until', date) 
        # if importance = 0, it's not sticky

        self.cleaned_data['sticky_until'] = self.cleaned_data['sticky_until'] or datetime.date(self.cleaned_data['date'])
        if self.cleaned_data['importance'] == 0:
            self.cleaned_data['sticky_until'] = None 
        elif self.cleaned_data['sticky_until'] < datetime.date(self.cleaned_data['date']):
            self.cleaned_data['sticky_until'] = datetime.date(self.cleaned_data['date'])
        return self.cleaned_data
Exemplo n.º 4
0
def fb_month_range(year, month):
    import calendar, datetime
    one_day = datetime.timedelta(days=1)
    first_weekday, length = calendar.monthrange(year, month)
    start = str(datetime.date(year,month,1))
    end = str(datetime.date(year,month,length) + one_day) # FB API uses midnight-before-this-day as cutoff for "until"
    return (start, end)
Exemplo n.º 5
0
    def process_timestamp(self, elem):
        if self._skip:
            return

        timestamp = elem.text
        year = int(timestamp[:4])
        month = int(timestamp[5:7])
        day = int(timestamp[8:10])
        revision_time = date(year, month, day)

        if (self.start_date and revision_time < dt.date(self.start_date)):
            self._skip_revision = True
            return
        if (self.end_date and revision_time > dt.date(self.end_date)):
            self._skip_revision = True
            return

        self._date = (revision_time - self.s_date).days
        ## default value for self._date is a list where
        ## first element is for total revisions, the second
        ## for revisions made by bot and the last one for
        ## anonymous' revisions
        t = self._counter.get(self._date, [0, 0, 0])
        t[0] += 1  # increment total revisions
        self._counter[self._date] = t

        del revision_time, t
        self.count += 1
        if not self.count % 500000:
            self.flush()
            print 'PAGES:', self.counter_pages, 'REVS:', self.count
    def _limit_dates(self):
        '''
        INPUT: None
        OUTPUT: None

        Keeps observations within [min_date, max_date], inclusive (where a day is defined as 4 AM to 4 AM the next day).
        Does other minimal cleaning.
        '''

        for df_name in self.feature_dfs.iterkeys():
            df = self.feature_dfs[df_name]
            if df_name == 'df_BluetoothProximity':
                ''' Limits dates to relevant period; removes possibly erroneous nighttime observations'''
                df = df.rename(columns={'date': 'local_time'})
                df['local_time'] = pd.DatetimeIndex(pd.to_datetime(df['local_time']))
                df = df[df['local_time'].dt.hour >= 7] # Per Friends and Family paper (8.2.1), removes b/n midnight and 7 AM
            elif df_name == 'df_Battery':
                df = df.rename(columns={'date': 'local_time'})
            elif df_name == 'df_AppRunning':
                df = df.rename(columns={'scantime': 'local_time'})

            df['local_time'] = pd.DatetimeIndex(pd.to_datetime(df['local_time']))
            df.loc[df['local_time'].dt.hour < 4, 'local_time'] = (pd.DatetimeIndex(df[df['local_time'].dt.hour < 4]['local_time']) - \
                                                                 DateOffset(1))
            df['date'] = df['local_time'].dt.date
            df = df.drop('local_time', axis=1)
            df = df[((df['date'] >= datetime.date(pd.to_datetime(self.min_date))) & \
                     (df['date'] <= datetime.date(pd.to_datetime(self.max_date))))]
            self.feature_dfs[df_name] = df
Exemplo n.º 7
0
  def get_duplicate_candidates(self, q=''):
    event_day = datetime.date(self.start)
    day_after_event = datetime.date(self.end) + timedelta(days=1)

    duplicate_candidates = Event.objects.filter(start__gte=event_day).filter(end__lte=day_after_event).exclude(id=self.id).filter(title__icontains=q)

    return duplicate_candidates
Exemplo n.º 8
0
    def add(self):
        # print(self.country_name)
        # print(self.start_date)
        # print(self.end_date)
        first_date = datetime.strptime(self.start_date, '%Y/%m/%d')
        first_date = datetime.date(first_date)
        # print(first_date)
        self.first_date2 = first_date
        #print(self.first_date2)
        #
        sec_date = datetime.strptime(self.end_date, '%Y/%m/%d')
        sec_date = datetime.date(sec_date)
        # print(sec_date)
        self.sec_date2 = sec_date
        #print(self.sec_date2)

        if self.first_date2 > self.sec_date2:
            raise Error('Your first date was after the second date')
        else:
            if self.start_date in self.locations:
                raise Error('Date already used')
            else:
                self.locations.append(self.country_name)
                self.locations.append(self.start_date)
                self.locations.append(self.end_date)

        # print(self.locations)
        return self.locations
Exemplo n.º 9
0
def tickets_detail():
    """券 详情页面"""
    openid = session.get("openid")
    user_agent = request.headers.get('User-Agent')
    print user_agent
    # 如果操作是在微信网页端进行,要获取openid
    if 'MicroMessenger' in user_agent:
        if not openid:
            code = request.args.get("code")
            if not code:
                print "not code"
                return redirect(WeixinHelper.oauth2(request.url))
            else:
                wechat_login_fun(code)

    tickets_id = request.args.get('tid', 0, type=int)
    print "ticket_id", tickets_id
    ticket = GetTicketRecord.query.get(tickets_id)
    now = datetime.date(datetime.now())
    if ticket:
        expire_date = datetime.date(ticket.create_at) + timedelta(days=ticket.discount.usable)
        isexpire = (now - expire_date).days
        print '-' * 10, isexpire
    else:
        expire_date = ""
        isexpire = True
    shops = ticket.discount.shops
    return render_template('mobile/my_tickets_detail.html', nav=2, ticket=ticket, discount=ticket.discount,
                           shops=shops, expire_date=expire_date, isexpire=isexpire)
Exemplo n.º 10
0
def nop_form(request):
    context = {}
    date = None
    time = None

    if request.method == 'GET' \
            and ('date' in request.GET or 'time' in request.GET):
        form = NopForm(request.GET)

        if form.is_valid():
            date = form.cleaned_data['date']
            time = form.cleaned_data['time']
    else:
        form = NopForm(initial={'date': datetime.date(datetime.now()),
                                'time': datetime.time(datetime.now())})

    if not date:
        date = datetime.date(datetime.now())

    if not time:
        time = datetime.time(datetime.now())

    result = _bydate(date.year, date.month, date.day, time.hour, time.minute)
    context['nowplaying'] = result
    context['form'] = form
    return render_to_response('nop_form.html', context)
Exemplo n.º 11
0
    def __init__(self, *args, **kwargs):
        purpose = kwargs.pop('purpose', None)

        is_renewable = kwargs.pop('is_renewable', False)

        return_frequency = kwargs.pop('return_frequency', WildlifeLicence.DEFAULT_FREQUENCY)

        super(IssueLicenceForm, self).__init__(*args, **kwargs)

        if purpose is not None:
            self.fields['purpose'].initial = purpose

        if 'instance' not in kwargs:
            today_date = datetime.now()
            self.fields['issue_date'].initial = today_date.strftime(DATE_FORMAT)
            self.fields['start_date'].initial = today_date.strftime(DATE_FORMAT)

            self.fields['issue_date'].localize = False

            try:
                one_year_today = today_date.replace(year=today_date.year + 1)
            except ValueError:
                one_year_today = today_date + (
                datetime.date(today_date.year + 1, 1, 1) - datetime.date(today_date.year, 1, 1))

            self.fields['end_date'].initial = one_year_today.strftime(DATE_FORMAT)

            self.fields['is_renewable'].initial = is_renewable
            self.fields['is_renewable'].widget = forms.CheckboxInput()

            self.fields['return_frequency'].initial = return_frequency
Exemplo n.º 12
0
    def test_to_json_date_time_fields(self):
        """Check that Date, DateTime and Time Fields return correct ISO-formatted dates from trailguide.api.transform.to_json"""
        class TestDateTime(models.Model):
            the_date = models.DateField()
            the_datetime = models.DateTimeField()
            the_time = models.TimeField()
        
        now = datetime.now()
        instance = TestDateTime(
            the_date=datetime.date(now),
            the_datetime=now,
            the_time=datetime.time(now)                  
        )
        result = to_json(instance)

        self.assertIsInstance(
            result, 
            str, 
            "trailguide.api.transform.to_json did not return a string when testing DateTime fields."
        )
        
        self.assertEqual(
            json.loads(result),
            { "id": None,
              "the_date": datetime.date(now).isoformat(),
              "the_datetime": now.isoformat(),
              "the_time": datetime.time(now).isoformat() },
            "trailguide.api.transform.to_json did not return correctly formatted ISO date strings"                 
        )
Exemplo n.º 13
0
def statistics_payment_fill():
    cars_count = 100
    year_b = 2000
    month_b = 10
    day_b = 1

    year_e = 2014
    month_e = 11
    day_e = 1
    for x in range(0, cars_count):
        start_time = time.mktime(datetime.date(year_b, month_b, day_b).timetuple())
        end_time = time.mktime(datetime.date(year_e, month_e, day_e).timetuple())

        date = random.randrange(int(start_time), int(end_time))
        activation_time = datetime.datetime.fromtimestamp(date)

        car_number = (random.choice(string.ascii_letters) + random.choice(string.ascii_letters) + " "+ str(random.randint(1000,9999))+ random.choice(string.ascii_letters) + random.choice(string.ascii_letters)).upper()
        cost = random.randint(10,90)
        place_id = random.randint(1,6)
        transaction = 'string'

        pricehistory_id = get_current_pricehistory_id(place_id)
        estimated_time = calculate_estimated_time(activation_time, cost, place_id)
        pay = Payment(car_number, cost, estimated_time, transaction, place_id, pricehistory_id)
        pay.activation_time = activation_time
        db_session.add(pay)
        db_session.commit()
    return "all payments ok"
Exemplo n.º 14
0
def get_stories(sticky=False):
    pages = []
    if sticky:
        for page in NewsPage.objects.live():
            # skip stories that are in the future. 
            if page.story_date > datetime.date(datetime.now()):
                continue
            # skip stories that do not have a "sticky until" date.
            if page.sticky_until == None:
                continue
            # skip sticky stories that have 'expired'.
            if page.sticky_until and datetime.date(datetime.now()) > page.sticky_until:
                continue
            pages.append(get_story_summary(page))
    else:
        for page in NewsPage.objects.live():
            # skip stories that are in the future. 
            if page.story_date > datetime.date(datetime.now()):
                continue
            # skip pages that are still sticky. 
            # pages that have a sticky_until set to None or a date in the past fall through.
            if page.sticky_until and datetime.date(datetime.now()) <= page.sticky_until:
                continue
            pages.append(get_story_summary(page))

    return sorted(pages, key=lambda p: p['story_date_sort'], reverse=True)
def get_invite_stats():
    import MySQLdb
    import datetime

    conn = MySQLdb.connect(
        host="internal-db.s47095.gridserver.com", user="******", passwd="Flames285", db="db47095_bumptopusers"
    )

    cursor = conn.cursor()

    cursor.execute(
        "SELECT DATE(sent_time), count(1) FROM invite_codes WHERE sent_time IS NOT NULL AND DATE(sent_time) != DATE('0000-00-00') GROUP BY 1;"
    )
    invited_users = fill_in_missing_days(cursor, datetime.date(2008, 7, 7))

    cursor.execute("SELECT COUNT(*) FROM invite_codes WHERE sent_time IS NULL;")
    num_old_invited_users = [tuple for tuple in cursor]

    invited_users = [("%04d-%02d-%02d" % (2008, 7, 7), num_old_invited_users[0][0])] + invited_users

    cursor.execute(
        "SELECT DATE(first_auth_time), count(1) FROM invite_codes WHERE first_auth_time IS NOT NULL AND DATE(first_auth_time) != DATE('0000-00-00') GROUP BY 1;"
    )
    authorized_users = fill_in_missing_days(cursor, datetime.date(2008, 7, 7))

    cursor.execute("SELECT COUNT( * ) FROM invite_codes WHERE first_auth_time IS NULL AND md5_hashes IS NOT NULL ;")
    num_old_authorized_users = [tuple for tuple in cursor]
    authorized_users = [("%04d-%02d-%02d" % (2008, 7, 7), num_old_invited_users[0][0])] + authorized_users

    cursor.close()
    conn.close()

    return {"invites_sent": invited_users, "invites_authorized": authorized_users}
Exemplo n.º 16
0
 def create_invoice(self):
     # Clearly just a test -- JLS
     xero = self.xero
     invoice = {
                 u'Status': u'DRAFT',
                 u'Total': u'264.00',
                 u'CurrencyRate': u'1.000000',
                 u'Reference': u'sdfghsfgh',
                 u'Type': u'ACCREC',
                 u'CurrencyCode': u'AUD',
                 u'AmountPaid': u'0.00',
                 u'TotalTax': u'24.00',
                 u'Contact': {
                     u'Name': u'Test One'
                 },
                 u'AmountDue': u'264.00',
                 u'Date': datetime.date(2014, 7, 24),
                 u'LineAmountTypes': u'Exclusive',
                 u'LineItems': {
                     u'LineItem': {
                         u'AccountCode': u'200',
                         u'TaxAmount': u'24.00',
                         u'Description': u'fgshfsdh',
                         u'UnitAmount': u'24.00',
                         u'TaxType': u'OUTPUT',
                         u'ItemCode': u'sfghfshg',
                         u'LineAmount': u'240.00',
                         u'Quantity': u'10.0000'
                     }
                 },
                 u'SubTotal': u'240.00',
                 u'DueDate': datetime.date(2014, 7, 24)
             }
     #xero.invoices.put(invoice)
Exemplo n.º 17
0
    def add(self, country_name="", start_date="", end_date=""):
        # print("A date string conforms to this format: YYYY/MM/DD")
        # WHERE DATE CHECKING GOES ----> ADD EXCEPTIONS
        first_date = datetime.strptime(start_date, "%Y/%m/%d")
        first_date = datetime.date(first_date)
        # print(first_date)

        sec_date = datetime.strptime(end_date, "%Y/%m/%d")
        sec_date = datetime.date(sec_date)
        # print(sec_date)

        if first_date > sec_date:
            raise ValueError("Your first date was after the second date")
        else:
            if start_date in self.locations:
                raise Exception("Date already used")
            else:
                self.locations.append(country_name)
                self.locations.append(first_date)
                self.locations.append(sec_date)

        ##########this belongs in current_country function, however not working atm################## IT WORKS
        ###find how to get these varaibles into the current country function###
        date_string = datetime.strptime("2000/01/15", "%Y/%m/%d")
        date_string = datetime.date(date_string)
        if date_string > first_date and date_string < sec_date:
            # return country_name
            print(country_name)
        else:
            print("nope")

        return self.locations
Exemplo n.º 18
0
def check_latest(t, i):
	nt = datetime.date(dateutil.parser.parse(t))
	ni = datetime.date(i)
	if nt == ni:
		return True
	else:
		return False
Exemplo n.º 19
0
def get_case_info(context, chw_data, enddate, active):
    ''' Gives information about each case of a chw'''
    all_data = []
    for id, map in chw_data.items():
        form_dates = []
        mindate = datetime(2000, 1, 1)
        fttd = {'open': mindate, 'close': mindate, 'follow': mindate, 
                'referral': mindate}
        for form_id, form_info in map.items():
            form_type = CaseFormIdentifier.objects.get(form_identifier=
                                                       form_id.id).form_type
            for form in form_info:
                context['chw_name'] = form["meta_username"]
                timeend = form["meta_timeend"]
                if datetime.date(timeend) < enddate:
                    form_dates.append(timeend)
                if timeend > fttd[form_type] and enddate > datetime.date(
                                                                    timeend):
                    fttd[form_type] = timeend
        status = get_status(fttd, active, enddate)
        if not len(form_dates) == 0:
            all_data.append({'case_id': id.split("|")[1], 'total_visits': 
                             len(form_dates), 'start_date': 
                             get_first(form_dates), 'last_visit': 
                             get_last(form_dates), 'status': status})
    context['all_data'] = all_data
Exemplo n.º 20
0
def load_credits(request):
    if request.method == 'GET':
        return render_to_response('load_reports.html', {'tab':'pres', 'loaded':False}, context_instance=RequestContext(request))
    if request.method == 'POST':
        datafile = request.FILES.get('datafile')
        reader = unicode_csv_reader(datafile, delimiter=";")

        for row in reader:
            if len(row)>1:
                credit_data = clean_credit_row(row[:10])

                new_user_product, created = ClientCreditProduct.objects.update_or_create(
                    credit_line = credit_data[2],
                    client = credit_data[0],
                    defaults={
                        'promisory_note': credit_data[1],
                        'quota': credit_data[3],
                        'amount': credit_data[4],
                        'remaining_amount': credit_data[5],
                        'amortized_percent': credit_data[6],
                        'remaining_payments': credit_data[7],
                        'start_date': datetime.date(datetime.strptime(credit_data[8], '%d/%m/%Y')),
                        'end_date': datetime.date(datetime.strptime(credit_data[9], '%d/%m/%Y'))
                    }
                )

                new_user_product.save()
        return render_to_response('load_reports.html', {'tab':'pres', 'loaded_cre':True}, context_instance=RequestContext(request))
Exemplo n.º 21
0
def get_counts(chw_data, active, enddate, num_open, num_active, num_last_week,
               num_ref_late, num_reg_late, days_late_list, chw_name):
    ''' Gets the counts of different status cases and other information
    about this chw'''
    for id, map in chw_data.items():
        (chw_name, form_type_to_date, last_week) = \
            get_form_type_to_date(map, enddate)
        num_last_week += last_week
        # if the most recent open form was submitted more recently than  
        # the most recent close form then this case is open
        if form_type_to_date['open'] > form_type_to_date['close'] or \
            only_follow(form_type_to_date):
            num_open += 1
            if datetime.date(form_type_to_date['open']) >= active or \
                datetime.date(form_type_to_date['follow']) >= active:
                if referral_late(form_type_to_date, enddate, 3):
                    num_ref_late += 1
                else:
                    num_active += 1
            else:
                if referral_late(form_type_to_date, enddate, 3):
                    num_ref_late += 1
                else:
                    days_late = get_days_late(form_type_to_date, active)
                    days_late_list.append(days_late)
                    num_reg_late += 1
    return {'active': num_active, 'open': num_open, 'last_week': num_last_week,
            'reg_late': num_reg_late, 'ref_late': num_ref_late, 'days_late':
            days_late_list, 'chw_name': chw_name}
 def as400_date(dat):
     '''Accepts list of dates as strings from theAS400'''
     try:
         dat = dt.date(dt.strptime(dat[-6:], '%y%m%d'))
     except ValueError:
         dat = dt.date(dt.strptime('160101', '%y%m%d'))
     return dat
Exemplo n.º 23
0
def event_ical(request):
    event_list = Event.objects.filter(date__gte=datetime.now()-timedelta(weeks=4)).order_by('date')
    cal = vobject.iCalendar()
    cal.add('method').value = 'PUBLISH'  # IE/Outlook needs this
    cal.add('x-wr-calname').value = "Subterrarium intern"
    for event in event_list:
        url = settings.SITE_URL + reverse('event', args=[event.id])
        date = event.date - settings.EVENT_TIMEZONE.utcoffset(event.date)
        vevent = cal.add('vevent')
        if event.date.hour == 0 and event.date.minute == 0:
            vevent.add('X-FUNAMBOL-ALLDAY').value = '1'
            vevent.add('dtstart').value = datetime.date(event.date)
            vevent.add('dtend').value = datetime.date(event.date) + timedelta(days=1)
        else:
            vevent.add('dtstart').value = date
            vevent.add('dtend').value = date + timedelta(hours = 2)
        vevent.add('dtstamp').value = datetime.now()
        vevent.add('summary').value = event.title
        vevent.add('url').value = url
        vevent.add('uid').value = "%s-%s" % (event.id, settings.EVENT_REMINDER_FROM)
        vevent.add('description').value = url + "\n" + event.info
    icalstream = cal.serialize()
    response = HttpResponse(icalstream, mimetype='text/calendar; charset=utf-8')
    response['Filename'] = 'subkoordinator.ics'  # IE needs this
    response['Content-Disposition'] = 'attachment; filename=subkoordinator.ics'
    return response
Exemplo n.º 24
0
	def post(self):
		surveyid = int(self.request.get('surveyid'))
		survey = SurveyModel.get_by_id(surveyid)
		newsurvey_name = self.request.get('surveyname')
		raw_expiry = self.request.get('expiry')
		if raw_expiry:
			expiry = datetime.date(datetime.strptime(str(raw_expiry), '%Y-%m-%d'))
		else :
			expiry = datetime.date(datetime.strptime('2012-12-31', '%Y-%m-%d'))
		
		author = survey.author
		user = users.get_current_user()
		#check if no survey exists with same name for the author
		check = SurveyModel.gql("WHERE surveyname=:1 and author=:2 and expiry=:3", newsurvey_name, author,expiry)
		#self.response.out.write(check.count())
		if check.count() == 1:
			self.redirect("/error?code=1")
		elif author == user or isAdmin(user):
			survey.surveyname = newsurvey_name
			survey.expiry = expiry
			survey.put()
			#Updating Survey Questions
			questions = QuestionModel.gql("WHERE sid=:1 and author=:2", surveyid, author)
			for question in questions:
				question.surveyname = newsurvey_name
				question.put()
			self.redirect('/edit?' + urllib.urlencode({'id':surveyid }))
		else :
			self.redirect("/error?code=2")
Exemplo n.º 25
0
def user_info(request, project_id=None):
    treenode_id = int(request.POST['treenode_id'])
    ts = Treenode.objects.filter(pk=treenode_id).select_related('user', 'editor')
    if not ts:
        ts = Connector.objects.filter(pk=treenode_id).select_related('user', 'editor')
        if not ts:
            return HttpResponse(json.dumps({'error': 'Object #%s is not a treenode or a connector' % treenode_id}))
    t = ts[0]
    # Get all reviews for this treenode
    reviewers = []
    review_times = []
    for r, rt in Review.objects.filter(treenode=t) \
            .values_list('reviewer', 'review_time'):
        reviewers.append(User.objects.filter(pk=r) \
                .values('username', 'first_name', 'last_name')[0])
        review_times.append(str(datetime.date(rt)))
    # Build result
    return HttpResponse(json.dumps({'user': {'username': t.user.username,
                                             'first_name': t.user.first_name,
                                             'last_name': t.user.last_name},
                                    'creation_time': str(datetime.date(t.creation_time)),
                                    'editor': {'username': t.editor.username,
                                               'first_name': t.editor.first_name,
                                               'last_name': t.editor.last_name},
                                    'edition_time': str(datetime.date(t.edition_time)),
                                    'reviewers': reviewers,
                                    'review_times': review_times}))
Exemplo n.º 26
0
 def write(self, message):
     current_time = datetime.now()
     if (not self.last_date == datetime.date(current_time)):
         self.last_date = datetime.date(current_time)
         with open(self.HISTORY_FILE, 'a') as f:
             print('$', self.last_date, file=f)
     with open(self.HISTORY_FILE, 'a') as f:
         print(datetime.time(current_time).strftime("%H:%M:%S"), '- ' + message, file=f)
def as400_date(dat):
    '''Accepts list of dates as strings from theAS400'''
    try:
        dat = dt.date(dt.strptime(dat[-6:], '%y%m%d'))
    except:
        if dt.date(dt.strptime(dat[-6:], '%y%m%d')) == '1090001':
            dat = None
    return dat
Exemplo n.º 28
0
 def baby_boomer_status(self):
     "Returns the person's baby-boomer status."
     import datetime
     if datetime.date(1945, 8, 1) <= self.birth_date <= datetime.date(1964, 12, 31):
         return "Baby boomer"
     if self.birth_date < datetime.date(1945, 8, 1):
         return "Pre-boomer"
     return "Post-boomer"
Exemplo n.º 29
0
def detail(request, kind, post_id):
    post = None
    try:
        if kind == 'a':
            post = Activity.objects.get(id=post_id)
        elif kind == 'c':
            post = Course.objects.get(id=post_id)
    except ObjectDoesNotExist:
        raise Http404("post does not exist")

    initiator = post.initiator
    ini_list = []
    for item in initiator.courses.all():
        if item != post:
            ini_list.append(item)
    for item in initiator.activities.all():
        if item != post:
            ini_list.append(item)

    if initiator == request.user:
        is_self = True
    else:
        is_self = False

    if request.user in post.joined.all():
        has_joined = True
    else:
        has_joined = False
    if request.user in post.interested.all():
        interested_post = True
    else:
        interested_post = False

    schedule = post.get_post_schedule()
    if post.limit <= post.joined.count():
        status = 'full'
    elif datetime.date(datetime.now()) <= post.deadline:
        status = 'registering'
    elif post.deadline < datetime.date(datetime.now()) < DateJudge(post).start_date:
        status = 'tobegin'
    elif DateJudge(post).start_date <= datetime.now().date() <= DateJudge(post).end_date:
        status = 'ing'
    else:
        status = 'end'


    if kind == 'a':
        post = Activity.objects.get(id=post_id)

    elif kind == 'c':
        post = Course.objects.get(id=post_id)
    image_set = post.images.all()

    return render(request,
                  'post/detail.html',
                  {'kind': kind, 'post': post, 'schedule': schedule, 'list': ini_list, 'is_self': is_self,
                   'has_joined': has_joined,
                   'interested': interested_post, 'status': status,'images':image_set})
Exemplo n.º 30
0
def profile(username):
    user = User.query.filter_by(username=username).first()

    if user is None:
        flash('User {} not found.'.format(username))
        return redirect(url_for('index'))

    else:
        links = Link.query.join(User, (User.id == Link.user_id)).\
            filter(User.username == user.username).\
            order_by(Link.timestamp.desc())

        daysAgo = []
        for x in xrange(8):
            daysAgo.append(
                datetime.date(datetime.utcnow() - timedelta(days=x)))

        # A list of the user's short URLs and long URLs
        listOfLinksQuery = Link.query.\
            join(User, (User.id == Link.user_id)).\
            filter(User.username == user.username).\
            order_by(Link.timestamp.desc())

        listOfKeysShortURL = [c.shorturl for c in listOfLinksQuery]
        listOfLongURL = [c.longurl for c in listOfLinksQuery]

        totalClicksPerLink = []
        for i in xrange(0, len(listOfKeysShortURL)):
            totalClicksPerLink.append(
                int(Click.query.filter(
                    Click.shorturl == listOfKeysShortURL[i]).count()))

        # A list of total clicks for each short URL
        # Broken down by each day of the week, starting with the most recent
        weeklyCounts = [[] for x in xrange(len(listOfKeysShortURL))]
        for key, value in enumerate(listOfKeysShortURL):
            for j in xrange(8):
                weeklyCounts[key].append(
                    int(Click.query.
                        filter(Click.shorturl == value).
                        filter(func.date(Click.timestamp) == daysAgo[j]).
                        count()))

        listOfFullShortURL = [str(request.url_root + 's/' + x)
                          for x in listOfKeysShortURL]
        listOfTimestamps = [datetime.date(link.timestamp) for link in links]

        masterList = zip(listOfLongURL,
                         listOfFullShortURL,
                         listOfKeysShortURL,
                         totalClicksPerLink,
                         weeklyCounts,
                         listOfTimestamps)

        return render_template("user.html",
                               title='Home',
                               user=g.user,
                               links=masterList)
Exemplo n.º 31
0
    def create_xml(self, cr, uid, ids, datas, context=None):
        obj_student = pooler.get_pool(cr.dbname).get('student.student')
        att_sheet_obj = pooler.get_pool(cr.dbname).get('attendance.sheet')
        if context is None:
            context = {}
        month = datetime(datas['form']['year'], datas['form']['month'], 1)
#        stu_ids = context.get('active_ids', [])
        stu_ids = datas['form']['stud_ids']
        user_xml = ['<month>%s</month>' % month2name[month.month],
                    '<year>%s</year>' % month.year]
        if stu_ids:
            for student in obj_student.read(cr, uid, stu_ids,
                                            ['name', 'standard_id']):
                days_xml = []
                user_repr = '''
                <user>
                  <name>%s</name>
                  %%s
                </user>
                ''' % (ustr(toxml(student['name'])))
                today, tomor = month, month + one_day
                while today.month == month.month:
                    day = today.day
                    attendance_sheet_domain = [('standard_id', '=',
                                                student['standard_id'][0]),
                                               ('month_id', '=', today.month)]
                    search_ids = att_sheet_obj.search(cr, uid,
                                                      attendance_sheet_domain,
                                                      context=context)
                    if not search_ids:
                        var = 'A'
                    else:
                        att_browse = att_sheet_obj.browse(cr, uid,
                                                          search_ids,
                                                          context=context)
                        for attendance_sheet_data in att_browse:
                            for line in attendance_sheet_data.attendance_ids:

                                if line.name == student['name']:

                                    if day == 1:
                                        att = line.one
                                    elif day == 2:
                                        att = line.two
                                    elif day == 3:
                                        att = line.three
                                    elif day == 4:
                                        att = line.four
                                    elif day == 5:
                                        att = line.five
                                    elif day == 6:
                                        att = line.six
                                    elif day == 7:
                                        att = line.seven
                                    elif day == 8:
                                        att = line.eight
                                    elif day == 9:
                                        att = line.nine
                                    elif day == 10:
                                        att = line.ten
                                    elif day == 11:
                                        att = line.one_1
                                    elif day == 12:
                                        att = line.one_2
                                    elif day == 13:
                                        att = line.one_3
                                    elif day == 14:
                                        att = line.one_4
                                    elif day == 15:
                                        att = line.one_5
                                    elif day == 16:
                                        att = line.one_6
                                    elif day == 17:
                                        att = line.one_7
                                    elif day == 18:
                                        att = line.one_8
                                    elif day == 19:
                                        att = line.one_9
                                    elif day == 20:
                                        att = line.one_0
                                    elif day == 21:
                                        att = line.two_1
                                    elif day == 22:
                                        att = line.two_2
                                    elif day == 23:
                                        att = line.two_3
                                    elif day == 24:
                                        att = line.two_4
                                    elif day == 25:
                                        att = line.two_5
                                    elif day == 26:
                                        att = line.two_6
                                    elif day == 27:
                                        att = line.two_7
                                    elif day == 28:
                                        att = line.two_8
                                    elif day == 29:
                                        att = line.two_9
                                    elif day == 30:
                                        att = line.two_0
                                    else:
                                        att = line.three_1

                                    if att:
                                        var = 'P'
                                    else:
                                        var = 'A'

                    # Week xml representation
#                    wh = hour2str(wh)
                    today_xml = '<day num="%s"> \
                                 <wh>%s</wh></day>' % ((today - month).days+1,
                                                       var)
                    dy = (today - month).days+1
                    days_xml.append(today_xml)
                    today, tomor = tomor, tomor + one_day
                user_xml.append(user_repr % '\n'.join(days_xml))

        rpt_obj = pooler.get_pool(cr.dbname).get('student.student')
        rml_obj = report_sxw.rml_parse(cr, uid, rpt_obj._name, context)
        users_obj = pooler.get_pool(cr.dbname).get('res.users')
        header_xml = '''
        <header>
        <date>%s</date>
        <company>%s</company>
        </header>
        ''' % (str(rml_obj.formatLang(time.strftime("%Y-%m-%d"),
                                      date=True))+' ' +
               str(time.strftime("%H:%M")),
               users_obj.browse(cr, uid, uid).company_id.name)

        first_date = str(month)
        som = datetime.strptime(first_date, '%Y-%m-%d %H:%M:%S')
        eom = som + timedelta(int(dy)-1)
        day_diff = eom-som
        date_xml = []
        cell = 1
        date_xml.append('<days>')
        if day_diff.days >= 30:
            len_mon = lengthmonth(som.year, som.month)
            date_xml += ['<dayy number="%d" \
                          name="%s" \
                          cell="%d" \
                          />' % (x,
                                 som.replace(day=x).strftime('%a'),
                                 x-som.day+1) for x in range(som.day,
                                                             len_mon+1)]
        else:
            if day_diff.days >= (lengthmonth(som.year, som.month)-som.day):
                len_mon = lengthmonth(som.year, som.month)
                date_xml += ['<dayy number="%d" \
                              name="%s" \
                              cell="%d" \
                              />' % (x,
                                     som.replace(day=x).strftime('%a'),
                                     x-som.day+1) for x in range(som.day,
                                                                 len_mon+1)]
            else:
                date_xml += ['<dayy number="%d" \
                              name="%s" \
                              cell="%d" \
                              />' % (x,
                                     som.replace(day=x).strftime('%a'),
                                     x-som.day+1) for x in range(som.day,
                                                                 eom.day+1)]
        cell = x-som.day+1
        day_diff1 = day_diff.days-cell+1
        width_dict = {}
        month_dict = {}
        i = 1
        j = 1
        year = som.year
        month = som.month
        month_dict[j] = som.strftime('%B')
        width_dict[j] = cell

        while day_diff1 > 0:
            if month+i <= 12:
                # Not on 30 else you have problems when entering \
                # 01-01-2009 for example
                if day_diff1 > lengthmonth(year, i + month):
                    som1 = datetime.date(year, month + i, 1)
                    mon = lengthmonth(year, i+month)
                    date_xml += ['<dayy number="%d" \
                                  name="%s" \
                                  cell="%d" \
                                  />' % (x,
                                         som1.replace(day=x).strftime('%a'),
                                         cell+x) for x in range(1, mon+1)]
                    i = i+1
                    j = j+1
                    month_dict[j] = som1.strftime('%B')
                    cell = cell+x
                    width_dict[j] = x
                else:
                    som1 = datetime.date(year, month+i, 1)
                    date_xml += ['<dayy number="%d" \
                                  name="%s" \
                                  cell="%d" \
                                  />' % (x,
                                         som1.replace(day=x).strftime('%a'),
                                         cell+x) for x in range(1, eom.day+1)]
                    i = i+1
                    j = j+1
                    month_dict[j] = som1.strftime('%B')
                    cell = cell+x
                    width_dict[j] = x
                day_diff1 = day_diff1-x
            else:
                years = year+1
                year = years
                month = 0
                i = 1
                if day_diff1 >= 30:
                    som1 = datetime.date(years, i, 1)
                    mon = lengthmonth(years, i)
                    date_xml += ['<dayy number="%d" \
                                  name="%s" \
                                  cell="%d" \
                                  />' % (x,
                                         som1.replace(day=x).strftime('%a'),
                                         cell+x) for x in range(1, mon+1)]
                    i = i+1
                    j = j+1
                    month_dict[j] = som1.strftime('%B')
                    cell = cell+x
                    width_dict[j] = x
                else:
                    som1 = datetime.date(years, i, 1)
                    i = i+1
                    j = j+1
                    month_dict[j] = som1.strftime('%B')
                    date_xml += ['<dayy number="%d" \
                                  name="%s" \
                                  cell="%d" \
                                  />' % (x,
                                         som1.replace(day=x).strftime('%a'),
                                         cell+x) for x in range(1, eom.day+1)]
                    cell = cell+x
                    width_dict[j] = x
                day_diff1 = day_diff1-x
        date_xml.append('</days>')
        date_xml.append('<cols>3.5cm%s</cols>\n' % (',0.74cm' * (int(dy))))
        xml = '''<?xml version="1.0" encoding="UTF-8" ?>
        <report>
        %s
        %s
        %s
        </report>
        ''' % (header_xml, '\n'.join(user_xml), date_xml)
        return xml
Exemplo n.º 32
0
    if this_permit in permits_part:
        this_idx = permits_part.index(this_permit)
        well_count_part[this_idx] += 1

name_list = [name_row, name_col, name]

#--now build data list
well_file_list = []
well_file_name = 'avg_ann.wel'
day_count = 0

this_year = 2011
for month in range(1, 13):
    num_days_month = calendar.monthrange(2011, month)[1]
    for day in range(1, num_days_month + 1):
        this_date = datetime.date(this_year, month, day).strftime('%B %d, %Y')
        day_count += 1
        well_array = np.zeros((nlay, nrow, ncol))
        this_well_file_list = []

        #--loop over the shp wells and fill the well_array
        for w_idx in range(len(wells)):
            this_rec = shp.record(w_idx)
            this_permit = this_rec[permit_idx]
            this_row = this_rec[row_idx]
            this_col = this_rec[col_idx]

            #--load layer portions
            this_layer_portions = []
            for p in portion_idxs:
                this_layer_portions.append(this_rec[p])
Exemplo n.º 33
0
    def get_txn_hist(self):
        wb = Workbook()
        procflag = False
        selection = self.var1.get()
        datest = dateen = ''
        if (self.Plan_text.get()
                == '') and (self.Txnstdate_text.get()
                            == '') and (self.Txnendate_text.get() == ''):
            tkinter.messagebox.showinfo("Error",
                                        "Please Enter Plan No or Product")
        elif (self.Plan_text.get() !=
              '') and (self.Txnstdate_text.get()
                       == '') and (self.Txnendate_text.get() == ''):
            datest = '1900-01-01'
            dateen = datetime.date(datetime.now())
            procflag = True
        elif (self.Plan_text.get() !=
              '') and (self.Txnstdate_text.get() !=
                       '') and (self.Txnendate_text.get() == ''):
            datest = dateen = self.Txnstdate_text.get()
            procflag = True
        elif (self.Plan_text.get() !=
              '') and (self.Txnstdate_text.get() !=
                       '') and (self.Txnendate_text.get() != ''):
            datest = self.Txnstdate_text.get()
            dateen = self.Txnendate_text.get()
            procflag = True
        else:
            tkinter.messagebox.showinfo("Error", "Please check Details")

        if procflag:
            if selection == 1:
                partplan = self.Plan_text.get()
                prod = ''
            else:
                partplan = ''
                prod = self.Plan_text.get()

            try:
                ws = wb.active
                ws.title = 'Client_Transaction_history'
                #            ws.path()
                column_names = [
                    "Product", "Application_number", "Plan_number",
                    "Customer_name", "Plan issue date", "Transaction Type",
                    "Description", "Source Fund Code", "Trade Date",
                    "Run Date", "Run Time", "Reversal Code", "Sequence Number",
                    "Post Number", "Base Curr Code", "Target Curr Code",
                    "Base to target Rate", "Fund Code", "Strategy Code",
                    "Life Stages Code", "Amount", "Unit Price", "No of Units",
                    "Transaction Description", "Comment", "Omni Activity Code",
                    "Source System Transaction Type ID", "Source Amount",
                    "Exchange Type Code", "Source Contribution Type",
                    "Reference Plan Number", "Request Reference",
                    "Pending Status"
                ]
                ws.append(column_names)
                sql_query = """Select PLN.PRD_CODE, PLN.APL_CODE, PLN.PLN_NUM, 
                       CONCAT(UPF.firstname,' ',UPF.first1,' ', UPF.first2, ' ', UPF.surname),
                       PLN.ISSU_DT, TP.TXN_TP_ID, TP.DSC,
                       FM.SRC_STM_FND_CODE,TRD_DT,RUN_DT,RUN_TM,RVRS_CODE,SEQ_NUM,PST_NUM,BASE_CCY_CODE,TRGT_CCY_CODE,
                       BASE_TO_TRGT_RATE,TXN.FND_CODE,TXN.IVSM_STRTG_CODE,TXN.LFE_STAGES_CODE,AMT,UNIT_PRC,
                       NUM_OF_UNITS,TXN_DSC,TXN.CMNT,OMNI_AVY_CODE,SRC_STM_TXN_TP_ID,SRC_AMT,EXG_TYPE_CODE,
                       SRC_CTB_TP_CODE,REFR_PLN_NUM,REQ_REFR,PNDG_ST
                       from ODS.TXN_HIST TXN JOIN ODS.PLN PLN ON TXN.PLN_NUM = PLN.PLN_NUM
                       JOIN MDM.TXN_TP TP ON TXN.TXN_TP_ID = TP.TXN_TP_ID
                       JOIN MDM.FND_MPNG FM ON PLN.PRD_CODE = FM.PRD_CODE AND FM.FND_CODE = TXN.FND_CODE AND
                       FM.IVSM_STRTG_CODE = TXN.IVSM_STRTG_CODE AND FM.LFE_STAGES_CODE = TXN.LFE_STAGES_CODE
                       JOIN dbo.unplatform UPF ON  PLN.PLN_NUM = UPF.planid AND PLN.APL_CODE = UPF.applicant
                       where (PLN.PLN_NUM = ? OR PLN.PRD_CODE = ?) AND TRD_DT >= ? AND TRD_DT <= ?"""
                input1 = (partplan, prod, datest, dateen)
                cursor.execute(sql_query, input1)
                result = cursor.fetchall()
                if len(result) == 0:  # this not working
                    tkinter.messagebox.showinfo(
                        "Error", "Check Input Details! No Data Found")
                else:
                    for row in result:
                        ws.append(tuple(row))

                    workbook_name = self.Plan_text.get(
                    ) + "_Transaction history"
                    wb.save(workbook_name + ".xlsx")
                    tkinter.messagebox.showinfo(
                        "Error",
                        "Transaction File Created and saved on your /Desktop")
            except pyodbc.Error as err1:
                tkinter.messagebox.showinfo(
                    "Error",
                    "There is some problem in fetching data!! Check Connection or"
                    " Input Details")
                print(err1)
Exemplo n.º 34
0
def daysuntil(date):
    delta = datetime.date(date) - datetime.now().date()
    return delta.days
data['tra']['dow'] = data['tra']['visit_date'].dt.dayofweek
data['tra']['year'] = data['tra']['visit_date'].dt.year
data['tra']['month'] = data['tra']['visit_date'].dt.month
data['tra']['visit_date'] = data['tra']['visit_date'].dt.date

data['tes']['visit_date'] = data['tes']['id'].map(
    lambda x: str(x).split('_')[2])
data['tes']['air_store_id'] = data['tes']['id'].map(
    lambda x: '_'.join(x.split('_')[:2]))
data['tes']['visit_date'] = pd.to_datetime(data['tes']['visit_date'])
data['tes']['dow'] = data['tes']['visit_date'].dt.dayofweek
data['tes']['year'] = data['tes']['visit_date'].dt.year
data['tes']['month'] = data['tes']['visit_date'].dt.month
data['tes']['visit_date'] = data['tes']['visit_date'].dt.date

data['build'] = data['tra'][(data['tra']['visit_date'] < datetime.date(
    2017, 3, 12))]
data['valid'] = data['tra'][
    (data['tra']['visit_date'] >= datetime.date(2017, 3, 12))
    & (data['tra']['visit_date'] <= datetime.date(2017, 4, 19))]

unique_stores = data['valid']['air_store_id'].unique()
stores = pd.concat([
    pd.DataFrame({
        'air_store_id': unique_stores,
        'dow': [i] * len(unique_stores)
    }) for i in range(7)
],
                   axis=0,
                   ignore_index=True).reset_index(drop=True)

tmp = data['build'].groupby(['air_store_id', 'dow']).agg(
Exemplo n.º 36
0
    i += 1
    j += 1

# Input the start and end dates for Officers' Workshop sheet .
startD = START_DAY_OF_OFFICERS_WORKSHOP  # Start Day must be a Monday.
startM = START_MONTH_OF_OFFICERS_WORKSHOP
startY = START_YEAR_OF_OFFICERS_WORKSHOP
endD = END_DAY_OF_OFFICERS_WORKSHOP
endM = END_MONTH_OF_OFFICERS_WORKSHOP
endY = END_YEAR_OF_OFFICERS_WORKSHOP

startDate = datetime.datetime.strptime(
    str(startD) + "-" + str(startM) + "-" + str(startY), "%d-%m-%Y")
endDate = datetime.datetime.strptime(
    str(endD) + "-" + str(endM) + "-" + str(endY), "%d-%m-%Y")
weeksBetweenDates = (datetime.date(endY, endM, endD) -
                     datetime.date(startY, startM, startD)).days / 7
worksheetW.set_column(1, int(weeksBetweenDates * 7), 10)
lineIndex = 0
columnIndex = 1
endW = 0
week = 0
# Format the Officers' Workshop sheet information.
while week < weeksBetweenDates:
    nextMonth = False
    if startD + 6 > calendar.monthrange(startY, startM)[1]:
        nextMonth = True
        endW = startD + 6 - calendar.monthrange(startY, startM)[1]
    else:
        endW = startD + 6
    worksheetW.merge_range(lineIndex, columnIndex, lineIndex, columnIndex + 6,
Exemplo n.º 37
0
 def is_expired(self):
     return self.end_time < datetime.date(timezone.now())
Exemplo n.º 38
0
    parser.add_option("-i",
                      "--item_types",
                      action="store",
                      dest="item_types",
                      default='PSScene4Band',
                      help="the item types, e.g., PSScene4Band,PSOrthoTile")
    parser.add_option(
        "-a",
        "--planet_account",
        action="store",
        dest="planet_account",
        default='*****@*****.**',
        help="planet email account, e.g., [email protected]")
    parser.add_option("-p",
                      "--process_num",
                      action="store",
                      dest="process_num",
                      type=int,
                      default=10,
                      help="number of processes to download images")

    (options, args) = parser.parse_args()
    if len(sys.argv) < 2 or len(args) < 1:
        parser.print_help()
        sys.exit(2)

    basic.setlogfile('download_planet_images_%s.log' %
                     str(datetime.date(datetime.now())))

    main(options, args)
Exemplo n.º 39
0
import os, uuid
import matplotlib.pyplot as plt
from datetime import datetime

winOrLoss = input('Enter Win or Loss: ')

from neural_net import csPer5, DATA_DIR

for file in os.listdir(DATA_DIR):
    if file.endswith('.png'):
        os.remove(os.path.join(DATA_DIR, file))

currentDate = str(datetime.date(datetime.now()))

try:
    os.mkdir(os.path.join(DATA_DIR, currentDate))
except:
    pass  # ignore exception if folder exists

numGamesForCurrentDate = len(os.listdir(os.path.join(DATA_DIR,
                                                     currentDate))) + 1

x = list(csPer5.keys())
y = list(csPer5.values())
goal_y = [38, 90, 135, 180, 225, 270]

fig, ax = plt.subplots()
ax.set_title('Game ' + str(numGamesForCurrentDate) + ': ' + winOrLoss)
ax.plot(x, y, label='achieved')
ax.plot(x, goal_y, label='goal')
ax.set_yticks(goal_y + [300])
Exemplo n.º 40
0
    elif choices == 3 and a == "user":
        returned_book = input("\nEnter the  book name  u returned:")
        if returned_book in issued_book and issued_book[returned_book] == 5:
            print("This book is not issued")
        elif returned_book in issued_book and issued_book[returned_book] <= 5:
            count = issued_book[returned_book] + 1
            issued_book.update({returned_book: count})

        else:
            print("Wrong Entry ...Book names are case sensitive ")
            continue
        import datetime
        year = int(input('Enter issued_date  year'))
        month = int(input('Enter issued_date  month'))
        day = int(input('Enter issued_date day'))
        issued_date1 = datetime.date(year, month, day)
        #print(issued_date1)
        today1 = datetime.date.today()
        issued_date_converting = int(issued_date1.strftime("%d"))
        today_date_converting = int(today1.strftime("%d"))
        total_days = int(today_date_converting - issued_date_converting)
        days = total_days - 7  #user penalty will start after 7 days from issued date
        fine = 0
        if days > 5 and days <= 30:
            fine = days * 20
            print("But u are", days, "days late so u will pay..", fine)
        elif days > 30:
            fine = days * 20
            print(" You are One month late so u will pay...", fine)
        elif days < 0:
            print("enter correct date")
Exemplo n.º 41
0
#      Ref: https://realpython.com/python-datetime/
#      Ref: https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes
#    Satus: <runs> - <bug (false output , script does not run)> - <broken (link, module, file is missing)>
#    Satus: BUG
#   Status: def fn_timestemp( io_tag , part_1 , extension) does work. It gives me a nice file names with timestemp.
#       >N: Why do I get: ' cannot import 'datetime' from 'datetime'.
#  ------------------------------------

# dates are easily constructed and formatted
from datetime import datetime, date, time
today = date.today()
print(f'With data.today I get the day: {today} !')

# now.strftime("%m-%d-%y. %d %b %Y is a %A on the %d day of %B.")
# dates support calendar arithmetic
birthday = datetime.date(1964, 11, 27)
age = today - birthday
print(age.days / 365)
print("----- " * 3)
now = datetime.now()
# current_datetime = time(now.year, now.month, now.day , now.hour, now.minute)
print(f'Current time: {now} !')
print("----- " * 3)
print(now.year)
print(now.hour)

print("----- " * 3)


def fn_timestemp(io_tag, part_1, extension):
    ''' return: string , to be used as filename
Exemplo n.º 42
0
                }
            },
        })


def document_types():
    url = _SESSION_STUFF['api_url_base'].format('documentTypes')
    r = _calcbench_session().get(url, verify=_SESSION_STUFF['ssl_verify'])
    r.raise_for_status()
    return r.json()


def html_diff(html_1, html_2):
    '''Diff two pieces of html and return a html diff'''
    return _json_POST('textDiff', {'html1': html_1, 'html2': html_2})


if __name__ == '__main__':

    import datetime
    _rig_for_testing(domain='localhost')
    docs = list(
        document_search(company_identifiers=['cpf'],
                        document_name="CommitmentAndContingencies",
                        all_history=True))
    [d.get_contents() for d in docs]
    filings(entire_universe=True,
            start_date=datetime.date(2019, 1, 1),
            end_date=datetime.date.today(),
            filing_types=[") 'select * from secfilings'"])
Exemplo n.º 43
0
                      user="******",
                      passwd="1234")
c = con.cursor()

# 파일 읽기
# Suppliers 테이블에 데이터를 입력한다.
file_reader = csv.reader(open(input_file, 'r'), delimiter=',')
header = next(file_reader)
for row in file_reader:
    data = []
    for column_index in range(len(header)):
        if column_index < 4:
            data.append(
                str(row[column_index]).lstrip('$').replace(' ', '').strip())
        else:
            a_date = datetime.date(
                datetime.strptime(str(row[column_index]), "%m/%d/%y"))
            # %Y를 쓰면 2016으로, %y를 쓰면 16으로 저장
            a_date = a_date.strftime("%y-%m-%d")
            data.append(a_date)
    print(data)
    c.execute("""INSERT INTO Suppliers VALUES(%s, %s, %s, %s, %s);""", data)
con.commit()
print("")

# Suppliers 테이블에 질의한다.
c.execute("SELECT * FROM Suppliers")
rows = c.fetchall()
for row in rows:
    row_list_output = []
    for column_index in range(len(row)):
        row_list_output.append(row[column_index])
Exemplo n.º 44
0
def day(request):
    with open('fitnerapp/static/api_key.txt', 'r') as f:
        key = f.read()
        YOUTUBE_DATA_API_KEY = key

    if request.method == 'GET':
        r = request.GET
        try:
            today = r['today']
            part = today[-4:]
            if part == 'left':
                today = today[:-4]
                now = datetime.strptime(today, "%Y-%m-%d") - timedelta(1)
            else:
                now = datetime.strptime(today, "%Y-%m-%d") + timedelta(1)
        except:
            now = datetime.now()

    # 총 운동 시간
    #time_sum = Data.objects.aggregate(Sum('total_time'))
    time_filter = Data.objects.filter(
        registered_dttm__date=datetime.date(now).isoformat()).aggregate(
            Sum('total_time'))
    #t_values = time_sum.values()
    t_values = time_filter.values()

    for i in t_values:
        t_values = i

    if (t_values == None):
        t_values = 0

    t_value_sec = t_values

    hours = t_values // 3600
    t_values = t_values - hours * 3600
    mu = t_values // 60
    ss = t_values - mu * 60
    #print(hours, '시간', mu, '분', ss, '초')

    # 운동한 영상
    #video = Data.objects.annotate(Count('videoId'))
    video = Data.objects.filter(
        registered_dttm__date=datetime.date(now).isoformat()).annotate(
            Count('videoId'))
    v_values = list(video.values_list('videoId'))
    video_cnt = len(v_values)
    #print(video_cnt)

    # 전날 대비 운동량
    yesterday_filter = Data.objects.filter(registered_dttm__date=datetime.date(
        now - timedelta(1)).isoformat()).aggregate(Sum('total_time'))
    yesterday_values = yesterday_filter.values()

    for i in yesterday_values:
        yesterday_values = i

    if yesterday_values == None:
        yesterday_values = 0

    gap_time = t_value_sec - yesterday_values
    gap_time_signed = gap_time

    gap_time = abs(gap_time)
    hours_gap = gap_time // 3600
    gap_time = gap_time - hours * 3600
    mu_gap = gap_time // 60
    ss_gap = gap_time - mu * 60

    if hours_gap == 0 and mu_gap == 0:
        if gap_time_signed < 0:
            gap_time = '-' + str(gap_time) + '초'
        else:
            gap_time = '+' + str(gap_time) + '초'
    elif hours_gap == 0 and mu_gap != 0:
        if gap_time_signed < 0:
            gap_time = '-' + str(mu_gap) + '분'
        else:
            gap_time = '+' + str(mu_gap) + '분'
    elif hours_gap != 0:
        if gap_time_signed < 0:
            gap_time = '-' + str(hours_gap) + '시간'
        else:
            gap_time = '+' + str(hours_gap) + '시간'

    # 그래프(운동 시간으로 나타냄)
    today_data = Data.objects.filter(
        registered_dttm__date=datetime.date(now).isoformat()).all()
    today_data_values = today_data.values()

    graph_data = {}
    graph_data_list = []
    for i in today_data_values:
        a = datetime.time(i["registered_dttm"]).hour
        graph_data_list.append({str(a) + "시": i["total_time"]})

    tmp_total_graph_data = []
    try:
        sum_graph_data = Counter(graph_data_list[0])
        total_graph_data = []
        for i in range(1, len(graph_data_list)):
            sum_graph_data = sum_graph_data + Counter(graph_data_list[i])
        sum_graph_data_dict = dict(sum_graph_data)
        for j in range(len(sum_graph_data_dict)):
            keys = list(sum_graph_data_dict.keys())
            values = list(sum_graph_data_dict.values())
            total_graph_data.append({'x': keys[j], 'y': values[j]})
    except IndexError:
        total_graph_data = []

    # 운동한 영상별 유사도
    results = Data.objects.filter(registered_dttm__date=datetime.date(
        now).isoformat()).order_by('-registered_dttm')

    result_values = list(results.values())

    video_ids = []
    for i in result_values:
        video_ids.append(i['videoId'])

    # 모달창 썸네일,비디오 이름
    video_url = 'https://youtube.googleapis.com/youtube/v3/videos'
    video_params = {
        'key': YOUTUBE_DATA_API_KEY,
        'part': 'snippet',
        'id': video_ids,
    }

    video_r = requests.get(video_url, params=video_params)

    try:
        video_results = video_r.json()['items']
        index = 0
        for i in result_values:
            i['channelTitle'] = video_results[index]['snippet']['channelTitle']
            i['video_title'] = video_results[index]['snippet']['title']
            i['video_thumbnail'] = video_results[index]['snippet'][
                'thumbnails']['high']['url']
            index += 1
    except:
        api_key(video_r, YOUTUBE_DATA_API_KEY)
        video_results = []
    if now.weekday() == 0:
        weekday = '월'
    elif now.weekday() == 1:
        weekday = '화'
    elif now.weekday() == 2:
        weekday = '수'
    elif now.weekday() == 3:
        weekday = '목'
    elif now.weekday() == 4:
        weekday = '금'
    elif now.weekday() == 5:
        weekday = '토'
    elif now.weekday() == 6:
        weekday = '일'

    context = {
        'today': str(now.strftime("%Y-%m-%d")),
        'month': now.month,
        'day': now.day,
        'weekday': weekday,
        'hours': hours,
        'mu': mu,
        'ss': ss,
        'video_cnt': video_cnt,
        'gap_time': gap_time,
        'graph_data': total_graph_data,
        'results': result_values
    }

    return render(request, 'day.html', context)
Exemplo n.º 45
0
def PlaceOrder(customerName, employee, server, store):
    #formats all the ids
    if customerName != "NULL":
        customerName = customerName[1:]
    if store != "NULL":
        store = "'" + store + "'"
    if employee != "NULL":
        employee = employee[1:]
    cart = list()
    cartIds = list()
    cost = 0
    while 1:
        print("1. Browse Inventory")
        print("2. Search Inventory")
        print("3. View Cart")
        print("4. Check Out")
        print("5. Go Back")
        userInp = input()
        try:
            userInp = int(userInp)
        except:
            print("Please enter a valid option")
            continue
        if userInp == 1:
            print("Browsing inventory")
            #grabs item table
            row = server.command("SELECT  * FROM item")
            number = 0
            processed = list()
            #This formats and stores the table
            for x in row:
                temp = str(x).split(",")
                tempId = temp[0][4:-2]
                tempDescrip = temp[2][2:-1]
                tempQuan = temp[3][:-1]
                tempPrice = temp[1][10:-2]
                temp = [tempId, tempPrice, tempDescrip, tempQuan]
                processed.append(temp)
                print(f"{number}) {tempDescrip} - {tempQuan} -- ${tempPrice}")
                number = number + 1
            #This grabs all the sets
            row = server.command("SELECT * FROM full_set")
            #This next loops seach and collects the price of the sets
            holder = list()
            for x in row:
                temp = str(x).split(",")
                temp[0] = temp[0][4:-2]
                temp[1] = temp[1][2:-2]
                holder.append(temp)
            for x in holder:
                #Gets all the item ids in the sets
                row = server.command("SELECT * FROM set_item WHERE set_id='" +
                                     x[0] + "';")
                tempIdAndCount = list()
                for y in row:
                    tempIdAndCount.append((str(y[1][2:-1]), y[2]))
                total = 0
                for y in tempIdAndCount:
                    t1, t2 = y
                    #Gets all the prices of the ids
                    row = server.command(
                        "SELECT item_price FROM item WHERE item_id='" + t1 +
                        "';")
                    t3 = float()
                    for z in row:
                        t3 = str(z)
                        t3 = t3[10:-4]
                        t3 = float(t3)
                    total = total + (t3 * t2)
                    total = round(total, 2)
                print(f"({number}) - {x[1]} -- ${total}")
                tempAll = [x[0], total, x[1], 2]
                processed.append(tempAll)
                number = number + 1
            print("-------------------------------------")
            Item = input(
                "Do you want to add one of items to the order(Enter y for yes or n for no): "
            )
            #This lets you add items to your cart from the browse menu
            while Item != 'n':
                tempItem = input("Enter the Item number you want to add: ")
                try:
                    tempItem = int(tempItem)
                except:
                    print("Please enter a valid option")
                    continue
                #stores the id, description and total price
                if tempItem < number:
                    temp = processed[tempItem]
                    if int(temp[3]) > 0:
                        cost = cost + float(temp[1])
                        cost = round(cost, 2)
                        cartIds.append(temp[0])
                        cart.append(temp[2])
                        print(f"({temp[2]} - {temp[3]} -- ${cost}")
                        print("Item Added to Cart")
                    else:
                        print("Sorry the Item is out of stock")
                        print("-------------------------------------")
                Item = input(
                    "Do you want to add another item to the order(Enter y for yes or n for no): "
                )

        #This is used to search for a specific item by item description
        elif userInp == 2:
            print("Search Inv")
            Item = input(
                "Enter the description of the Item you are looking for: ")
            print("item number, Price, Description, Amount in Stock")
            #trys to match desciption
            StringCommand = "SELECT * FROM item WHERE item_description=" + "'" + str(
                Item) + "'" + ";"
            row = server.command(StringCommand)
            if row == None:
                print("could not find item")
            #If it finds the item it ask if you want to add it to the cart
            else:
                for x in row:
                    holder = x
                    temp = str(x).split(",")
                    print(temp[1:])
                tempInput = input(
                    "Would you like to add this item to your order(y for yes, n for no)?: "
                )
                #This addeds the item
                if tempInput == 'y':
                    temp = str(holder).split(",")
                    cost = cost + float(temp[1][10:-2])
                    cost = round(cost, 2)
                    cartIds.append(temp[0])
                    cart.append(temp[2])
        #This shows the current cart
        elif userInp == 3:
            print("-------------------------------------")
            print(cart)
            print("-------------------------------------")
            print("Total Cost : " + str(cost))
        #This is paying for the cart
        elif userInp == 4:
            #stops if the cart is empty
            if cart == []:
                print("Cart is empty")
            else:
                if customerName == 'NULL' and employee == 'NULL':
                    print("Order has been placed!")
                else:
                    while 1:
                        #Select the payment method
                        print("1: Master Card")
                        print("2: Visa")
                        if employee != 'NULL':
                            print("3: Cash")
                        print("4: Go Back")
                        menu = input()
                        try:
                            menu = int(menu)
                        except:
                            print("Please enter a valid option")
                            continue
                        card = ""
                        #Ask for the credit card
                        if menu == 1 or menu == 2:
                            if menu == 2:
                                card = "'visa'"
                            if menu == 1:
                                card = "'mc'"
                            payment = input(
                                "Please enter a valid credit card number: ")
                            #payment="1111111111111111111111111111111111"
                            while len(str(payment)) < 16:
                                print("number was invalid")
                                payment = input(
                                    "Please enter a valid credit card number: "
                                )
                        if menu == 3:
                            card = "'cash'"
                        if menu == 4:
                            break
                        #Adds the order to the order table
                        StringCommand = "INSERT INTO orders(customer_id, employee_id, store_id, order_date, order_price,payment_type, delivery_date, active) VALUES("
                        nowDate = str(
                            datetime.date(datetime.now()) + timedelta(days=10))
                        StringCommand = StringCommand + customerName + " ," + employee + "," + "(SELECT store_id FROM store WHERE store_name =" + store + ") , '" + str(
                            datetime.date(datetime.now())) + "' , " + str(
                                cost
                            ) + ", " + card + ", '" + nowDate + "', 'yes');"
                        # print(StringCommand)
                        server.command(str(StringCommand))
                        server.command("COMMIT TRANSACTION")
                        addRevenue(cost, server, store)
                        if employee != "NULL":
                            addSales(cost, server, employee)
                        print("Have Fun!")
                        break

        elif userInp == 5:
            break
        else:
            print("Not an option!")
print("output #46: yesterday:{0!s}".format(yesterday))

eight_hours = timedelta(hours=-8)
print("output #47: {0!s} {1!s} ".format(eight_hours.days, eight_hours.seconds))
#使用timedelta時,括號內的時間會被轉換成日、秒、毫秒,秒的計算過程是:24hr * 3,600秒 - 8hr * 3,600秒

date_diff = today - yesterday
print("output #48: {0!s}".format(date_diff))  #計算出來的結果是用datetime呈現
print("output #49: {0!s}".format(str(date_diff).split()[0]))
#將date_diff換成string,利用split取出第[0]個索引,date_diff的內容為 1 day 0:00:00

##使用strftime,來用date物件建立特定格式的字串
print("output #50: {0!s}".format(today.strftime('%m/%d/%Y')))
print("output #51: {0!s}".format(today.strftime('%b %d, %Y')))

##使用strftime,以特定格式的字串建立datetime物件
#產生代表日期的字串
date1 = today.strftime('%m%d%Y')
date2 = today.strftime('%b %d, %Y')
date3 = today.strftime('%Y-%m-%d')
date4 = today.strftime('%B %d, %Y')

#兩個datetime物件、兩個date物件
###datetime中的strf跟strp的差別:strf用來正規化日期、strp用來將字串轉換為datetime格式
print("output #54: {0!s}".format(datetime.strptime(date1, '%m%d%Y')))

###只想取日期
print("output #56: {0!s}".format(
    datetime.date(datetime.strptime(date3, '%Y-%m-%d'))))

#===日期的運算處理結束===
Exemplo n.º 47
0
def quarter(listt, dateB, dateF):
    year = dateB[0:4]
    month = dateB[5:7]
    day = dateB[8:10]
    yearE = dateF[0:4]
    monthE = dateF[5:7]
    dayE = dateF[8:10]
    listt.insert(
        7,
        datetime.datetime.strptime(day + "-" + month + "-" + year, "%d-%m-%Y"))
    listt.insert(
        11,
        datetime.datetime.strptime(dayE + "-" + monthE + "-" + yearE,
                                   "%d-%m-%Y"))

    q = None
    if datetime.date(2018, 7, 1) <= datetime.date(
            int(year), int(month), int(day)) < datetime.date(2018, 11, 1):
        q = "Q1"
    elif datetime.date(2018, 11, 1) <= datetime.date(
            int(year), int(month), int(day)) < datetime.date(2019, 2, 1):
        q = "Q2"
    elif datetime.date(2019, 2, 1) <= datetime.date(
            int(year), int(month), int(day)) < datetime.date(2019, 5, 1):
        q = "Q3"
    elif datetime.date(2019, 5, 1) <= datetime.date(
            int(year), int(month), int(day)) < datetime.date(2019, 8, 1):
        q = "Q4"
    elif datetime.date(2019, 8, 1) <= datetime.date(
            int(year), int(month), int(day)) < datetime.date(2019, 11, 1):
        q = "Q5"
    elif datetime.date(2019, 11, 1) <= datetime.date(
            int(year), int(month), int(day)) < datetime.date(2019, 2, 1):
        q = "Q6"
    elif datetime.date(2019, 2, 1) <= datetime.date(
            int(year), int(month), int(day)) < datetime.date(2019, 5, 1):
        q = "Q7"
    elif datetime.date(2019, 5, 1) <= datetime.date(
            int(year), int(month), int(day)) < datetime.date(2019, 8, 1):
        q = "Q8"
    elif datetime.date(2019, 8, 1) <= datetime.date(
            int(year), int(month), int(day)) < datetime.date(2019, 11, 1):
        q = "Q9"
    elif datetime.date(2019, 11, 1) <= datetime.date(
            int(year), int(month), int(day)) < datetime.date(2019, 2, 1):
        q = "Q10"
    elif datetime.date(2019, 2, 1) <= datetime.date(
            int(year), int(month), int(day)) < datetime.date(2019, 5, 1):
        q = "Q11"
    elif datetime.date(2019, 5, 1) <= datetime.date(
            int(year), int(month), int(day)) < datetime.date(2019, 8, 1):
        q = "Q12"
    elif datetime.date(2019, 8, 1) <= datetime.date(
            int(year), int(month), int(day)) < datetime.date(2019, 11, 1):
        q = "Q13"
    if q != None:
        listO.pop(2)
        listt.insert(2, q)
def build_model():

    #main input is the length of the amino acid in the protein sequence (700,)
    main_input = Input(shape=(700, ), dtype='float32', name='main_input')

    #Embedding Layer used as input to the neural network
    embed = Embedding(output_dim=21, input_dim=21,
                      input_length=700)(main_input)

    #secondary input is the protein profile features
    auxiliary_input = Input(shape=(700, 21), name='aux_input')
    #auxiliary_input = Masking(mask_value=0)(auxiliary_input)

    #get shape of input layers
    print("Protein Sequence shape: ", main_input.get_shape())
    print("Protein Profile shape: ", auxiliary_input.get_shape())

    #concatenate 2 input layers
    concat = Concatenate(axis=-1)([embed, auxiliary_input])

    ######## Recurrent Bi-Directional Long-Short-Term-Memory Layers ########
    lstm_f1 = Bidirectional(
        LSTM(400,
             return_sequences=True,
             activation='tanh',
             recurrent_activation='sigmoid',
             dropout=0.5,
             recurrent_dropout=0.5))(conv_features)

    lstm_f2 = Bidirectional(
        LSTM(300,
             return_sequences=True,
             activation='tanh',
             recurrent_activation='sigmoid',
             dropout=0.5,
             recurrent_dropout=0.5))(lstm_f1)

    #concatenate LSTM with convolutional layers
    concat_features = Concatenate(axis=-1)([lstm_f1, lstm_f2, conv_features])
    concat_features = Dropout(0.4)(concat_features)

    #Dense Fully-Connected DNN layers
    dense_1 = Dense(300, activation='relu')(conv_features)
    dense_1_dropout = Dropout(dense_dropout)(dense_1)
    dense_2 = Dense(100, activation='relu')(dense_1_dropout)
    dense_2_dropout = Dropout(dense_dropout)(dense_2)
    dense_3 = Dense(50, activation='relu')(dense_2_dropout)
    dense_3_dropout = Dropout(dense_dropout)(dense_3)
    dense_4 = Dense(16, activation='relu')(dense_3_dropout)
    dense_4_dropout = Dropout(dense_dropout)(dense_4)

    #Final Dense layer with 8 nodes for the 8 output classifications
    main_output = Dense(8, activation='softmax',
                        name='main_output')(dense_4_dropout)

    #create model from inputs and outputs
    model = Model(inputs=[main_input, auxiliary_input], outputs=[main_output])

    #use Adam optimizer
    adam = Adam(lr=0.0003)
    #Adam is fast, but tends to over-fit
    #SGD is low but gives great results, sometimes RMSProp works best, SWA can easily improve quality, AdaTune

    #compile model using adam optimizer and the cateogorical crossentropy loss function
    model.compile(optimizer=adam,
                  loss={'main_output': 'categorical_crossentropy'},
                  metrics=[
                      'accuracy',
                      MeanSquaredError(),
                      FalseNegatives(),
                      FalsePositives(),
                      TrueNegatives(),
                      TruePositives(),
                      MeanAbsoluteError(),
                      Recall(),
                      Precision()
                  ])
    model.summary()

    #set earlyStopping and checkpoint callback
    earlyStopping = EarlyStopping(monitor='val_loss',
                                  patience=5,
                                  verbose=1,
                                  mode='min')
    checkpoint_path = "/blstm_3x1Dconv_dnn_" + str(
        datetime.date(datetime.now())) + ".h5"
    checkpointer = ModelCheckpoint(filepath=checkpoint_path,
                                   verbose=1,
                                   save_best_only=True,
                                   monitor='val_acc',
                                   mode='max')

    return model
Exemplo n.º 49
0
def covid19(request):
    # In order to split the chart by chamber and to track party totals,
    # we need to pass some additional information into the template.
    # Hopefully it remains more efficient to pass only info for legislators
    # listed in the HTML table than for all currently serving members,
    # but we'll also pass current member totals so we can compute the
    # current working membership of each chamber.
    import datetime
    from person.models import PersonRole, RoleType

    # Scan the template for the <table>s that hold information about
    # legislators.
    legislator_data = {}
    with open('templates/website/covid19.html') as f:
        for line in f:
            m = re.search(
                r"<td>(\d+/\d+/\d+)</td>.*href=\"https://www.govtrack.us/congress/members/\S+/(\d+)",
                line)
            if m:
                # For each table line with a date and legislator id, record
                # in legislator_data.
                datestr, id = m.groups()
                id = int(id)
                date = datetime.date(int("20" + datestr.split("/")[2]),
                                     int(datestr.split("/")[0]),
                                     int(datestr.split("/")[1]))
                legislator_data[str(id) + "__" + datestr] = { # key must match how client-side script does a lookup
                    "id": id,
                    "date": date,
                }

    # Fetch all of the PersonRoles that cover the date range of the records.
    current_members = list(
        PersonRole.objects.filter(
            enddate__gte=min(d['date'] for d in legislator_data.values()),
            startdate__lte=max(d['date'] for d in legislator_data.values()),
        ).select_related("person"))

    # Find the PersonRole for each record.
    for data in legislator_data.values():
        for pr in current_members:  # hard to make more efficient because of date check
            if pr.person.id == data[
                    'id'] and pr.startdate <= data['date'] <= pr.enddate:
                break
        else:
            raise Exception("Row with unmatched role: " + repr(data))

        # Store data to pass to the template.
        data.update({
            "chamber":
            "senate" if pr.role_type == RoleType.senator else "house",
            "is_voting":
            not pr.is_territory,  # doesn't affect total for quorum
            "party":
            pr.get_party_on_date(data['date']),
        })

    # Remove date because it is not JSON serializable.
    for data in legislator_data.values():
        del data['date']

    # To show the current party breakdown of each chamber, count up the total membership.
    # We'll subtract quanrantined members on the client side.
    current_party_totals = {}
    for pr in current_members:
        if pr.is_territory: continue
        chamber = "senate" if pr.role_type == RoleType.senator else "house"
        party = pr.caucus or pr.party
        current_party_totals.setdefault(chamber, {})
        current_party_totals[chamber].setdefault(party, {})
        current_party_totals[chamber][party][
            "count"] = current_party_totals[chamber][party].get("count", 0) + 1
        if pr.caucus:
            current_party_totals[chamber][party]["has_independent"] = True
    for chamber in current_party_totals:  # list the majority party first
        current_party_totals[chamber] = sorted(
            current_party_totals[chamber].items(),
            key=lambda p: -p[1]["count"])

    return {
        "legislator_data": legislator_data,
        "current_party_totals": current_party_totals,
    }
Exemplo n.º 50
0
def get_date(zone, offset=0):
    """Get date based on timezone and offset of days."""
    return datetime.date(datetime.fromtimestamp(
        time.time() + 86400 * offset, tz=zone))
def main(args):

    #setting parsed input arguments
    job_dir = str(args.job_dir)
    all_data = float(args.alldata)
    batch_size = int(args.batch_size)
    epochs = int(args.epochs)
    logs_path = str(args.logs_dir)

    print("Logs Path: ", logs_path)
    print('Job Logs: ', job_dir)

    #if all_data argument not b/w 0 and 1 then its set to default value - 0.5
    if all_data not in range(0, 1):
        all_data = 0.5

    print('Running model using {}%% of data'.format(int(all_data * 100)))
    train_hot, trainpssm, trainlabel, val_hot, valpssm, vallabel = load_cul6133_filted(
        all_data)
    test_hot, testpssm, testlabel = load_cb513(all_data)

    #build model
    print('Building 3x1Dconv BLSTM model')
    model = build_model()

    #initialise model callbacks
    tensorboard = tf.keras.callbacks.TensorBoard(log_dir=logs_path,
                                                 histogram_freq=0,
                                                 write_graph=True,
                                                 write_images=True)
    checkpoint = tf.keras.callbacks.ModelCheckpoint(
        filepath="blstm_3conv_checkpoint/",
        verbose=1,
        save_best_only=True,
        monitor='val_acc',
        mode='max')

    # with tf.device('/gpu:0'): #use for training with GPU on TF
    print('Fitting model...')
    history = model.fit({
        'main_input': train_hot,
        'aux_input': trainpssm
    }, {'main_output': trainlabel},
                        validation_data=({
                            'main_input': val_hot,
                            'aux_input': valpssm
                        }, {
                            'main_output': vallabel
                        }),
                        epochs=epochs,
                        batch_size=batch_size,
                        verbose=1,
                        callbacks=[tensorboard, checkpoint],
                        shuffle=True)

    print('Evaluating model')
    score = model.evaluate({
        'main_input': test_hot,
        'aux_input': testpssm
    }, {'main_output': testlabel},
                           verbose=1,
                           batch_size=1)
    # eval_score = score[1]

    #initialise TensorBoard summary variables
    loss_summary = tf.summary.scalar(name='Loss Summary', data=score[0])
    accuracy_summary = tf.summary.scalar(name='Accuracy Summary',
                                         data=score[1])

    print('Model Loss : ', score[0])
    print('Model Accuracy : ', score[1])

    model_blob_path = 'models/model_blstm_3x1Dconv_' +'epochs_' + str(args.epochs) +'_'+ 'batch_size_' + str(args.batch_size) + '_' + str(datetime.date(datetime.now())) + \
        '_' + str((datetime.now().strftime('%H:%M')))

    model_save_path = 'model_blstm_3x1Dconv_' +'epochs_' + str(args.epochs) +'_'+ 'batch_size_' + str(args.batch_size) + '_' + str(datetime.date(datetime.now())) + \
        '_' + str((datetime.now().strftime('%H:%M')))+ '_accuracy-'+ str(score[1]) \
        +'_loss-' + str(score[0]) + '.h5'

    #create directory in bucket for new model - name it the model name, store model
    upload_history(history, model_save_path, score)
    upload_model(model, args, model_save_path, model_blob_path)
    plot_history(history.history,
                 show_histograms=True,
                 show_boxplots=True,
                 show_kde=True)
Exemplo n.º 52
0
import math
import pymysql as mysql
from math import radians, sin, cos, acos, asin, pi, sqrt
from datetime import datetime, timedelta, date
past = datetime.now() - timedelta(days=60)
past = datetime.date(past)
yesterday = datetime.now() - timedelta(days=1)
yesterday = datetime.date(yesterday)
from threading import Thread
from time import sleep
from datetime import datetime, timedelta
import pdfCreatorPropiedades as pdfC
import uf
import numpy as np
from sklearn import datasets, linear_model

uf1 = uf.getUf()


def estaciones():
    mariadb_connection = mysql.connect(user='******',
                                       password='******',
                                       host='127.0.0.1',
                                       database='metro')
    cur = mariadb_connection.cursor()
    sql = "SELECT * FROM estaciones"
    cur.execute(sql)
    tupla = cur.fetchall()
    return tupla

Exemplo n.º 53
0
 def set_default_window_values(self):
     self.ui.name_field.set_text("John Doe")
     self.ui.date_field.set_text(datetime.date(datetime.now()))
     self.ui.status_field.set_text(check_status("John Doe"))
Exemplo n.º 54
0
def month(request):
    with open('fitnerapp/static/api_key.txt', 'r') as f:
        key = f.read()
        YOUTUBE_DATA_API_KEY = key

    def IsLeapYear(year):
        if year % 4 == 0 and (year % 100 != 0 or year % 400 == 0):
            return True
        else:
            return False

    MonthDayCountList = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]

    def GetMonthLastDate(sourceDate):
        dayCount = MonthDayCountList[sourceDate.month - 1]
        if sourceDate.month == 2:
            if IsLeapYear(sourceDate.year):
                dayCount += 1
        targetDate = datetime(sourceDate.year, sourceDate.month, dayCount)
        return targetDate

    if request.method == 'GET':
        r = request.GET
        try:
            today = r['today']
            part = today[-4:]
            if part == 'left':
                today = today[:-4]
                now = datetime.strptime(today, "%Y-%m-%d") - timedelta(1)
                first_day = now.replace(day=1)
                now = first_day - timedelta(days=1)
            else:
                last_day = GetMonthLastDate(
                    datetime.strptime(today, "%Y-%m-%d"))
                now = last_day + timedelta(1)
        except:
            now = datetime.now()

    # 총 운동 시간
    #time_sum = Data.objects.aggregate(Sum('total_time'))
    time_filter = Data.objects.filter(
        registered_dttm__month=now.month).aggregate(Sum('total_time'))

    #t_values = time_sum.values()
    t_values = time_filter.values()

    for i in t_values:
        t_values = i

    if (t_values == None):
        t_values = 0

    t_value_sec = t_values

    hours = t_values // 3600
    t_values = t_values - hours * 3600
    mu = t_values // 60
    ss = t_values - mu * 60
    #print(hours, '시간', mu, '분', ss, '초')

    # 운동한 영상
    #video = Data.objects.annotate(Count('videoId'))
    video = Data.objects.filter(registered_dttm__month=now.month).annotate(
        Count('videoId'))
    v_values = list(video.values_list('videoId'))
    video_cnt = len(v_values)
    #print(video_cnt)

    # 전날 대비 운동량
    yesterday_filter = Data.objects.filter(registered_dttm__month=now.month -
                                           1).aggregate(Sum('total_time'))
    yesterday_values = yesterday_filter.values()

    for i in yesterday_values:
        yesterday_values = i

    if yesterday_values == None:
        yesterday_values = 0

    gap_time = t_value_sec - yesterday_values
    gap_time_signed = gap_time

    gap_time = abs(gap_time)
    hours_gap = gap_time // 3600
    gap_time = gap_time - hours * 3600
    mu_gap = gap_time // 60
    ss_gap = gap_time - mu * 60

    if hours_gap == 0 and mu_gap == 0:
        if gap_time_signed < 0:
            gap_time = '-' + str(gap_time) + '초'
        else:
            gap_time = '+' + str(gap_time) + '초'
    elif hours_gap == 0 and mu_gap != 0:
        if gap_time_signed < 0:
            gap_time = '-' + str(mu_gap) + '분'
        else:
            gap_time = '+' + str(mu_gap) + '분'
    elif hours_gap != 0:
        if gap_time_signed < 0:
            gap_time = '-' + str(hours_gap) + '시간'
        else:
            gap_time = '+' + str(hours_gap) + '시간'

    # 그래프(운동 시간으로 나타냄)
    #t_day = Data.objects.values('total_time')
    #매월 마지막날 구하기
    def IsLeapYear(year):
        if year % 4 == 0 and (year % 100 != 0 or year % 400 == 0):
            return True
        else:
            return False

    MonthDayCountList = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]

    def GetMonthLastDate(sourceDate):
        dayCount = MonthDayCountList[sourceDate.month - 1]
        if sourceDate.month == 2:
            if IsLeapYear(sourceDate.year):
                dayCount += 1
        targetDate = datetime(sourceDate.year, sourceDate.month, dayCount)
        return targetDate

    #총 날짜
    first_day = now.replace(day=1)
    last_day = GetMonthLastDate(now)
    month_label = []
    dt_index = pandas.date_range(start=datetime.date(first_day),
                                 end=datetime.date(last_day))
    for time in dt_index:
        month_label.append(time.strftime("%m-%d"))

    month_data = Data.objects.filter(registered_dttm__month=now.month).all()
    month_data_values = month_data.values()

    tmp_graph_data_list = []
    for i in month_data_values:
        tmp_graph_data_list.append(
            {i['registered_dttm'].strftime("%Y-%m-%d"): i['total_time']})

    try:
        sum_graph_data = Counter(tmp_graph_data_list[0])
        total_graph_data = []
        for i in range(1, len(tmp_graph_data_list)):
            sum_graph_data = sum_graph_data + Counter(tmp_graph_data_list[i])
        sum_graph_data_dict = dict(sum_graph_data)
        for j in range(len(sum_graph_data_dict)):
            keys = list(sum_graph_data_dict.keys())
            values = list(sum_graph_data_dict.values())
            total_graph_data.append({'x': keys[j][5:], 'y': values[j]})
    except IndexError:
        total_graph_data = []

    # 운동한 영상별 유사도
    results = Data.objects.filter(
        registered_dttm__month=now.month).order_by('-registered_dttm')

    result_values = list(results.values())
    video_ids = []
    for i in result_values:
        video_ids.append(i['videoId'])

    # 모달창 썸네일,비디오 이름
    video_url = 'https://youtube.googleapis.com/youtube/v3/videos'
    video_params = {
        'key': YOUTUBE_DATA_API_KEY,
        'part': 'snippet',
        'id': video_ids,
    }
    video_r = requests.get(video_url, params=video_params)
    try:
        video_results = video_r.json()['items']
        index = 0
        for i in result_values:
            i['channelTitle'] = video_results[index]['snippet']['channelTitle']
            i['video_title'] = video_results[index]['snippet']['title']
            i['video_thumbnail'] = video_results[index]['snippet'][
                'thumbnails']['high']['url']
            index += 1
    except:
        api_key(video_r, YOUTUBE_DATA_API_KEY)
        video_results = []

    month = now.strftime('%Y. %m')

    context = {
        'today': str(now.strftime("%Y-%m-%d")),
        'month': month,
        'hours': hours,
        'mu': mu,
        'ss': ss,
        'video_cnt': video_cnt,
        'gap_time': gap_time,
        'month_label': month_label,
        'graph_data': total_graph_data,
        'results': result_values
    }

    return render(request, 'month.html', context)
Exemplo n.º 55
0
    def get_fsb(self):
        wb = Workbook()
        procflag = False
        #        selection = self.var1.get()
        datest = dateen = ''
        if (self.Plan_text.get()
                == '') and (self.Txnstdate_text.get()
                            == '') and (self.Txnendate_text.get() == ''):
            tkinter.messagebox.showinfo("Error", "Please Enter Plan No")
        elif (self.Plan_text.get() !=
              '') and (self.Txnstdate_text.get()
                       == '') and (self.Txnendate_text.get() == ''):
            if len(self.Plan_text.get()) < 10:
                tkinter.messagebox.showinfo(
                    "Error",
                    "Please Check Input Details and Enter Plan No./Date")
            else:
                #                datest = '1900-01-01'
                datest = datetime.date(datetime.now() - timedelta(1))
                dateen = datetime.date(datetime.now())
                procflag = True
        elif (self.Plan_text.get() !=
              '') and (self.Txnstdate_text.get() !=
                       '') and (self.Txnendate_text.get() == ''):
            if len(self.Plan_text.get()) < 10:
                tkinter.messagebox.showinfo(
                    "Error",
                    "Please Check Input Details and Enter Plan No./Date")
            else:
                datest = dateen = self.Txnstdate_text.get()
                procflag = True
        elif (self.Plan_text.get() !=
              '') and (self.Txnstdate_text.get() !=
                       '') and (self.Txnendate_text.get() != ''):
            if len(self.Plan_text.get()) < 10:
                tkinter.messagebox.showinfo(
                    "Error",
                    "Please Check Input Details and Enter Plan No./Date")
            else:
                datest = self.Txnstdate_text.get()
                dateen = self.Txnendate_text.get()
                procflag = True
        else:
            tkinter.messagebox.showinfo("Error", "Please check Details")

        if procflag:
            try:
                ws = wb.active
                ws.title = 'Client_Fund_Source_Balance'
                column_names = [
                    "Product", "Plan Name", "Plan No", "Omni Participant Id",
                    "Client Name", "Source Name", "ISIN", "Strategy Code",
                    "Investment Code", "No of Units", "Fund Curr",
                    "Unit Price", "Price NAV Date", "Fund Value",
                    "Un-invested Cash", "Pending Credits", "Pending Debits",
                    "Advisor", "Load Date"
                ]
                sql_query = """Select * from dbo.fundsrcbal
                               where partplanid = ?  AND loaddate >= ? AND loaddate <= ?"""
                input1 = (self.Plan_text.get(), datest, dateen)
                cursor.execute(sql_query, input1)
                result = cursor.fetchall()
                #            ws.path()
                if len(result) == 0:  # this not working
                    tkinter.messagebox.showinfo(
                        "Error", "Check Input Details! No Data Found")
                else:
                    ws.append(column_names)
                    for row in result:
                        ws.append(tuple(row))

                    workbook_name = self.Plan_text.get() + "_FundSource"
                    wb.save(workbook_name + ".xlsx")
                    tkinter.messagebox.showinfo(
                        "Error", "FSB File Created and saved on your /Desktop")
            except pyodbc.Error as err1:
                tkinter.messagebox.showinfo(
                    "Error",
                    "There is some problem in fetching data!! Check Connection or "
                    "Input Details")
                print(err1)
Exemplo n.º 56
0
def week(request):
    with open('fitnerapp/static/api_key.txt', 'r') as f:
        key = f.read()
        YOUTUBE_DATA_API_KEY = key

    if request.method == 'GET':
        r = request.GET

        try:
            today = r['today']
            part = today[-4:]
            if part == 'left':
                today = today[:-4]
                now = datetime.strptime(today, "%Y-%m-%d") - timedelta(7)
            else:
                now = datetime.strptime(today, "%Y-%m-%d") + timedelta(7)
        except:
            now = datetime.now()

    def AddDays(sourceDate, count):
        targetDate = sourceDate + timedelta(days=count)
        return targetDate

    def GetWeekFirstDate(sourceDate):
        temporaryDate = datetime(sourceDate.year, sourceDate.month,
                                 sourceDate.day)
        weekDayCount = temporaryDate.weekday()
        targetDate = AddDays(temporaryDate, -weekDayCount)
        return targetDate

    start_date = GetWeekFirstDate(datetime.date(now))

    # 총 운동 시간
    #time_sum = Data.objects.aggregate(Sum('total_time'))
    time_filter = Data.objects.filter(registered_dttm__range=[
        datetime.date(start_date),
        datetime.date(start_date) + timedelta(6)
    ]).aggregate(Sum('total_time'))

    #t_values = time_sum.values()
    t_values = time_filter.values()

    for i in t_values:
        t_values = i

    if (t_values == None):
        t_values = 0

    t_value_sec = t_values

    hours = t_values // 3600
    t_values = t_values - hours * 3600
    mu = t_values // 60
    ss = t_values - mu * 60
    #print(hours, '시간', mu, '분', ss, '초')

    # 운동한 영상
    #video = Data.objects.annotate(Count('videoId'))
    video = Data.objects.filter(
        registered_dttm__range=[start_date, start_date +
                                timedelta(6)]).annotate(Count('videoId'))
    v_values = list(video.values_list('videoId'))
    video_cnt = len(v_values)
    #print(video_cnt)

    # 전주 대비 운동량
    lastWeek_start_date = GetWeekFirstDate(
        datetime.date(start_date - timedelta(1)))
    lastWeek_filter = Data.objects.filter(registered_dttm__range=[
        lastWeek_start_date,
        datetime.date(lastWeek_start_date) + timedelta(6)
    ]).aggregate(Sum('total_time'))
    lastWeek_values = lastWeek_filter.values()

    for i in lastWeek_values:
        lastWeek_values = i

    if lastWeek_values == None:
        lastWeek_values = 0

    gap_time = t_value_sec - lastWeek_values
    gap_time_signed = gap_time

    gap_time = abs(gap_time)
    hours_gap = gap_time // 3600
    gap_time = gap_time - hours * 3600
    mu_gap = gap_time // 60
    ss_gap = gap_time - mu * 60

    if hours_gap == 0 and mu_gap == 0:
        if gap_time_signed < 0:
            gap_time = '-' + str(gap_time) + '초'
        else:
            gap_time = '+' + str(gap_time) + '초'
    elif hours_gap == 0 and mu_gap != 0:
        if gap_time_signed < 0:
            gap_time = '-' + str(mu_gap) + '분'
        else:
            gap_time = '+' + str(mu_gap) + '분'
    elif hours_gap != 0:
        if gap_time_signed < 0:
            gap_time = '-' + str(hours_gap) + '시간'
        else:
            gap_time = '+' + str(hours_gap) + '시간'

    # 그래프(운동 시간으로 나타냄)
    #t_day = Data.objects.values('total_time')
    week_filter = Data.objects.filter(registered_dttm__range=[
        datetime.date(start_date),
        datetime.date(start_date) + timedelta(6)
    ])
    week_data_values = week_filter.values()

    #print(week_data_values)
    tmp_week_graph = []
    for i in week_data_values:
        a = datetime.date(i["registered_dttm"]).weekday()
        if a == 0:
            tmp_week_graph.append({'월': i["total_time"]})
        elif a == 1:
            tmp_week_graph.append({'화': i["total_time"]})
        elif a == 2:
            tmp_week_graph.append({'수': i["total_time"]})
        elif a == 3:
            tmp_week_graph.append({'목': i["total_time"]})
        elif a == 4:
            tmp_week_graph.append({'금': i["total_time"]})
        elif a == 5:
            tmp_week_graph.appen({'토': i["total_time"]})
        elif a == 6:
            tmp_week_graph.append({'일': i["total_time"]})

    # for i in tmp_week_graph:
    #         print(i)

    tmp_total_graph_data = []
    try:
        sum_graph_data = Counter(tmp_week_graph[0])
        total_graph_data = []
        for i in range(1, len(tmp_week_graph)):
            sum_graph_data = sum_graph_data + Counter(tmp_week_graph[i])
        sum_graph_data_dict = dict(sum_graph_data)
        for j in range(len(sum_graph_data_dict)):
            keys = list(sum_graph_data_dict.keys())
            values = list(sum_graph_data_dict.values())
            total_graph_data.append({'x': keys[j], 'y': values[j]})
    except IndexError:
        total_graph_data = []

    # 운동한 영상별 유사도
    results = Data.objects.filter(registered_dttm__range=[
        datetime.date(start_date),
        datetime.date(start_date) + timedelta(6)
    ]).order_by('-registered_dttm')

    result_values = list(results.values())
    video_ids = []
    for i in result_values:
        video_ids.append(i['videoId'])

    # 모달창 썸네일,비디오 이름
    video_url = 'https://youtube.googleapis.com/youtube/v3/videos'
    video_params = {
        'key': YOUTUBE_DATA_API_KEY,
        'part': 'snippet',
        'id': video_ids,
    }
    video_r = requests.get(video_url, params=video_params)
    try:

        video_results = video_r.json()['items']
        index = 0
        for i in result_values:
            i['channelTitle'] = video_results[index]['snippet']['channelTitle']
            i['video_title'] = video_results[index]['snippet']['title']
            i['video_thumbnail'] = video_results[index]['snippet'][
                'thumbnails']['high']['url']
            index += 1
    except:
        api_key(video_r, YOUTUBE_DATA_API_KEY)
        video_results = []

    start_week = now - timedelta(days=now.weekday())
    end_week = start_week + timedelta(days=6)

    start_week = start_week.strftime("%Y. %m. %d")
    end_week = end_week.strftime("%Y. %m. %d")
    term = str(start_week) + ' ~ ' + str(end_week)

    context = {
        'today': str(now.strftime("%Y-%m-%d")),
        'term': term,
        'hours': hours,
        'mu': mu,
        'ss': ss,
        'video_cnt': video_cnt,
        'gap_time': gap_time,
        'graph_data': total_graph_data,
        'results': result_values
    }

    return render(request, 'week.html', context)
Exemplo n.º 57
0
    def per_workflow(self, trans, **kwd):
        message = ''
        PageSpec = namedtuple('PageSpec',
                              ['entries', 'offset', 'page', 'pages_found'])

        specs = sorter('workflow_name', kwd)
        sort_id = specs.sort_id
        order = specs.order
        arrow = specs.arrow
        _order = specs.exc_order
        time_period = kwd.get('spark_time')
        time_period, _time_period = get_spark_time(time_period)
        spark_limit = 30
        offset = 0
        limit = 10

        if "entries" in kwd:
            entries = int(kwd.get('entries'))
        else:
            entries = 10
        limit = entries * 4

        if "offset" in kwd:
            offset = int(kwd.get('offset'))
        else:
            offset = 0

        if "page" in kwd:
            page = int(kwd.get('page'))
        else:
            page = 1

        # In case we don't know which is the monitor user we will query for all jobs

        q = sa.select(
            (model.Workflow.table.c.id.label('workflow_id'),
             sa.func.min(model.Workflow.table.c.name).label('workflow_name'),
             sa.func.count(
                 model.WorkflowInvocation.table.c.id).label('total_runs')),
            from_obj=[model.Workflow.table, model.WorkflowInvocation.table],
            whereclause=sa.and_(model.WorkflowInvocation.table.c.workflow_id ==
                                model.Workflow.table.c.id),
            group_by=[model.Workflow.table.c.id],
            order_by=[_order],
            offset=offset,
            limit=limit)

        all_runs_per_workflow = sa.select(
            (model.Workflow.table.c.id.label('workflow_id'),
             model.Workflow.table.c.name.label('workflow_name'),
             self.select_day(
                 model.WorkflowInvocation.table.c.create_time).label('date')),
            from_obj=[model.Workflow.table, model.WorkflowInvocation.table],
            whereclause=sa.and_(model.WorkflowInvocation.table.c.workflow_id ==
                                model.Workflow.table.c.id))

        currday = date.today()
        trends = dict()
        for run in trans.sa_session.execute(all_runs_per_workflow):
            curr_tool = re.sub(r'\W+', '', str(run.workflow_id))
            try:
                day = currday - run.date
            except TypeError:
                day = currday - datetime.date(run.date)

            day = day.days
            container = floor(day / _time_period)
            container = int(container)
            try:
                if container < spark_limit:
                    trends[curr_tool][container] += 1
            except KeyError:
                trends[curr_tool] = [0] * spark_limit
                if container < spark_limit:
                    trends[curr_tool][container] += 1

        runs = []
        for row in trans.sa_session.execute(q):
            runs.append((row.workflow_name, row.total_runs, row.workflow_id))

        pages_found = ceil(len(runs) / float(entries))
        page_specs = PageSpec(entries, offset, page, pages_found)

        return trans.fill_template(
            '/webapps/reports/workflows_per_workflow.mako',
            order=order,
            arrow=arrow,
            sort_id=sort_id,
            spark_limit=spark_limit,
            time_period=time_period,
            trends=trends,
            runs=runs,
            message=message,
            page_specs=page_specs)
Exemplo n.º 58
0
#!/usr/bin/env python3
import glob
from reportlab.platypus import Paragraph, Spacer, Table
from reportlab.platypus import SimpleDocTemplate
from reportlab.lib.styles import getSampleStyleSheet
from datetime import datetime


def generate_report(attachment, title, paragraph):
    report = SimpleDocTemplate(attachment)
    styles = getSampleStyleSheet()
    report_title = Paragraph(title, styles["h1"])
    report_table = Table(data=paragraph)
    report.build([report_title, report_table])


if __name__ == "__main__":
    file = "processed.pdf"
    my_title = "Processed Update on {}".format(datetime.date(datetime.now()))
    all_items = []
    for file in glob.glob("supplier-data/descriptions/*.txt"):
        all_items.append(" ")
        description = {}
        with open(file, 'r') as f:
            lines = f.readlines()
        description["name:"] = lines[0].strip()
        description["weight:"] = lines[1].strip()
        all_items.append(description)
    print(all_items)
    generate_report(file, my_title, all_items)
Exemplo n.º 59
0
    def per_user(self, trans, **kwd):
        message = ''
        PageSpec = namedtuple('PageSpec',
                              ['entries', 'offset', 'page', 'pages_found'])

        specs = sorter('user_email', kwd)
        sort_id = specs.sort_id
        order = specs.order
        arrow = specs.arrow
        _order = specs.exc_order
        time_period = kwd.get('spark_time')
        time_period, _time_period = get_spark_time(time_period)
        spark_limit = 30
        offset = 0
        limit = 10

        if "entries" in kwd:
            entries = int(kwd.get('entries'))
        else:
            entries = 10
        limit = entries * 4

        if "offset" in kwd:
            offset = int(kwd.get('offset'))
        else:
            offset = 0

        if "page" in kwd:
            page = int(kwd.get('page'))
        else:
            page = 1

        workflows = []
        q = sa.select(
            (model.User.table.c.email.label('user_email'),
             sa.func.count(
                 model.StoredWorkflow.table.c.id).label('total_workflows')),
            from_obj=[
                sa.outerjoin(model.StoredWorkflow.table, model.User.table)
            ],
            group_by=['user_email'],
            order_by=[_order],
            offset=offset,
            limit=limit)

        all_workflows_per_user = sa.select(
            (model.User.table.c.email.label('user_email'),
             self.select_day(
                 model.StoredWorkflow.table.c.create_time).label('date'),
             model.StoredWorkflow.table.c.id),
            from_obj=[
                sa.outerjoin(model.StoredWorkflow.table, model.User.table)
            ])
        currday = datetime.today()
        trends = dict()
        for workflow in trans.sa_session.execute(all_workflows_per_user):
            curr_user = re.sub(r'\W+', '', workflow.user_email)
            try:
                day = currday - workflow.date
            except TypeError:
                day = datetime.date(currday) - datetime.date(workflow.date)

            day = day.days
            container = floor(day / _time_period)
            container = int(container)
            try:
                if container < spark_limit:
                    trends[curr_user][container] += 1
            except KeyError:
                trends[curr_user] = [0] * spark_limit
                if container < spark_limit:
                    trends[curr_user][container] += 1

        for row in trans.sa_session.execute(q):
            workflows.append((row.user_email, row.total_workflows))

        pages_found = ceil(len(workflows) / float(entries))
        page_specs = PageSpec(entries, offset, page, pages_found)

        return trans.fill_template('/webapps/reports/workflows_per_user.mako',
                                   order=order,
                                   arrow=arrow,
                                   sort_id=sort_id,
                                   spark_limit=spark_limit,
                                   trends=trends,
                                   time_period=time_period,
                                   workflows=workflows,
                                   message=message,
                                   page_specs=page_specs)
Exemplo n.º 60
0
from datetime import datetime


for j in range(28):
    d = j+1
    print(d,end=':')
    count = 0
    for i in range(12):
        m = i+1
        date=datetime.date(datetime(year=2022, month=m, day=d))
        if date.isoweekday() in [1, 2, 3, 4, 5]:
            count+=1
    print(count)