def cli():
    parser = OptionParser()
    parser.add_option("-b", "--bad", dest="bad_date",help="first known bad nightly build, default is today",
                      metavar="YYYY-MM-DD", default=str(datetime.date.today()))
    parser.add_option("-g", "--good", dest="good_date",help="last known good nightly build",
                      metavar="YYYY-MM-DD", default=None)
    parser.add_option("-e", "--addons", dest="addons",help="list of addons to install", metavar="PATH1,PATH2", default="")
    parser.add_option("-p", "--profile", dest="profile", help="profile to use with nightlies", metavar="PATH")
    parser.add_option("-a", "--args", dest="cmdargs", help="command-line arguments to pass to the application",
                      metavar="ARG1,ARG2", default="")
    parser.add_option("-n", "--app", dest="app", help="application name (firefox, fennec or thunderbird)",
                      metavar="[firefox|fennec|thunderbird]", default="firefox")
    parser.add_option("-r", "--repo", dest="repo_name", help="repository name on ftp.mozilla.org",
                      metavar="[tracemonkey|mozilla-1.9.2]", default=None)
    parser.add_option("--bits", dest="bits", help="force 32 or 64 bit version (only applies to x86_64 boxes)",
                      choices=("32","64"), default=mozinfo.bits)
    parser.add_option("--persist", dest="persist", help="the directory in which files are to persist ie. /Users/someuser/Documents")
    (options, args) = parser.parse_args()

    options.bits = parseBits(options.bits)

    addons = strsplit(options.addons, ",")
    cmdargs = strsplit(options.cmdargs, ",")

    if not options.good_date:
        options.good_date = "2009-01-01"
        print "No 'good' date specified, using " + options.good_date

    runner = NightlyRunner(appname=options.app, addons=addons, repo_name=options.repo_name,
                           profile=options.profile, cmdargs=cmdargs, bits=options.bits,
                           persist=options.persist)
    bisector = Bisector(runner, appname=options.app)
    bisector.bisect(get_date(options.good_date), get_date(options.bad_date))
Exemple #2
0
def fe7():
    prefix = '分支机构信息'
    df, train_num = utils.get_df(prefix, ['企业名称', '分支机构状态', '分支成立时间', '分支死亡时间'])
    df[prefix + '_分支机构状态'] = df[prefix + '_分支机构状态'].apply(lambda x: 1 if 1 == x else 0)
    df = pd.get_dummies(df, prefix=['dummy_' + prefix + '_分支机构状态'], columns=[prefix + '_分支机构状态'])
    df[prefix + '_分支成立时间'] = df[prefix + '_分支成立时间'].astype('str').apply(
        lambda x: utils.get_date(x) if x != 'nan' and x != '1899/12/30' else np.nan)
    df[prefix + '_分支死亡时间'] = df[prefix + '_分支死亡时间'].astype('str').apply(
        lambda x: utils.get_date(x) if x != 'nan' else np.nan)
    df[prefix + '_分支成立时间_分支死亡时间_diff']\
        = df[prefix + '_分支成立时间'] - df[prefix + '_分支死亡时间']
    raw_features = df.columns.values[1:]
    group = df.groupby('企业名称', as_index=False)
    for f in raw_features:
        if 'dummy' in f:
            df = pd.merge(df, utils.get_agg(group, f, ['sum', 'mean']), on='企业名称', how='left')
    df = pd.merge(df, group['企业名称'].agg({prefix + '_count': 'count'}), on='企业名称', how='left')
    df = pd.merge(df, utils.get_agg(group, prefix + '_分支成立时间', ['max', 'min', 'mean', 'diff_mean']),
                  on='企业名称', how='left')
    df = pd.merge(df, utils.get_agg(group, prefix + '_分支死亡时间', ['max', 'min', 'mean', 'diff_mean']),
                  on='企业名称', how='left')
    df = pd.merge(df, utils.get_agg(group, prefix + '_分支成立时间_分支死亡时间_diff', ['max', 'min', 'mean']),
                  on='企业名称', how='left')
    for f in raw_features:
        del df[f]
    train_df, test_df = df[:train_num], df[train_num:]
    train_df.drop_duplicates('企业名称', inplace=True)
    test_df.drop_duplicates('企业名称', inplace=True)
    return prefix, train_df, test_df
Exemple #3
0
  def generate_plot(self, filter_label):
    timestamps = [get_timestamp(t['created_at']) for t in self.source.iter_tweets()]
    bins = self.get_time_bins(timestamps)
    feature = self.source.get_stats(include_tweets=True)
    filter_stats = feature.get(filter_label, None)
    if not filter_stats:
      raise Exception("Could not get statistics for filter %s. Wrong filter label?" % filter_label)

    for k, feat in enumerate(filter_stats):    
      # Gather the different timestamps
      ttweets = filter_stats[feat]
      tally = [0] * self.steps

      timestamps = [float(get_timestamp(t['created_at'])) for t in ttweets]
      # Do some sort of histograms with it
      for ts in timestamps:
        j = self.get_bin(ts, bins)#index = int(ts/step)
        tally[j] = tally[j] + 1
      #tally, bins = np.histogram(timestamps, bins=steps)

      #bins = .5*(bins[1:]+bins[:-1])+starttime
      plt.plot(bins, tally, color=self.colors[k])
      dates = ["%s:%s" %(get_date(a).hour, get_date(a).minute) for j,a in enumerate(bins) if j%(self.steps/20) == 0]
      for i, date in enumerate(dates):
        split = date.split(':')
        if len(split[1]) == 1:
          dates[i] = split[0] + ':0' + split[1]
      plt.ylabel('Number of Tweets')
      plt.xticks([b for j,b in enumerate(bins) if j%(self.steps/20)==0], dates, rotation=90)

    plt.legend(filter_stats.keys(),loc=1)
Exemple #4
0
def cli():
    parser = OptionParser()
    parser.add_option("-b", "--bad", dest="bad_date",help="first known bad nightly build, default is today",
                      metavar="YYYY-MM-DD", default=str(datetime.date.today()))
    parser.add_option("-g", "--good", dest="good_date",help="last known good nightly build",
                      metavar="YYYY-MM-DD", default=None)
    parser.add_option("-e", "--addons", dest="addons",help="list of addons to install", metavar="PATH1,PATH2", default="")
    parser.add_option("-p", "--profile", dest="profile", help="profile to use with nightlies", metavar="PATH")
    parser.add_option("-a", "--args", dest="cmdargs", help="command-line arguments to pass to the application",
                      metavar="ARG1,ARG2", default="")
    parser.add_option("-n", "--app", dest="app", help="application name (firefox, mobile or thunderbird)",
                      metavar="[firefox|mobile|thunderbird]", default="firefox")
    parser.add_option("-r", "--repo", dest="repo_name", help="repository name on ftp.mozilla.org",
                      metavar="[tracemonkey|mozilla-1.9.2]", default=None)
    (options, args) = parser.parse_args()

    addons = strsplit(options.addons, ",")
    cmdargs = strsplit(options.cmdargs, ",")

    if not options.good_date:
        options.good_date = "2009-01-01"
        print "No 'good' date specified, using " + options.good_date

    runner = NightlyRunner(appname=options.app, addons=addons, repo_name=options.repo_name,
                           profile=options.profile, cmdargs=cmdargs)
    bisector = Bisector(runner, appname=options.app)
    bisector.bisect(get_date(options.good_date), get_date(options.bad_date))
Exemple #5
0
def check_dormancy(next_awaken=0,
                   dormancy_s_str='01:00:00',
                   dormancy_e_str='05:00:00'):
    func_name = '[mainloop.check_dormancy]'
    # unit is millisecond
    now_time = int(time.time() * 1000)

    # judge if at 02:00-04:00
    delay_time = 0
    today_s_timestamp = int(
        time.mktime(
            time.strptime(
                utils.get_date(format='%Y%m%d ') + ' ' + dormancy_s_str,
                "%Y%m%d %H:%M:%S"))) * 1000
    today_e_timestamp = int(
        time.mktime(
            time.strptime(
                utils.get_date(format='%Y%m%d ') + ' ' + dormancy_e_str,
                "%Y%m%d %H:%M:%S"))) * 1000
    if now_time >= today_s_timestamp and now_time <= today_e_timestamp:
        delay_time = (today_e_timestamp - now_time) / 1000
        sqlact.del_todaytask()
    else:
        delay_time = int(next_awaken - now_time) / 1000
    #
    debug_p(func_name, 'delay=', delay_time)
    time.sleep(delay_time + 1)
    return 0
Exemple #6
0
    def date_data(self, req, data):
        """data for the date"""
        now = datetime.datetime.now()
        data['days'] = range(1, 32)
        data['months'] = [ (i, calendar.month_name[i]) for i in range(1,13) ]        
        data['years'] = range(now.year, now.year - 10, -1)
        if 'from_year' in req.args:
            from_date = get_date(req.args['from_year'], 
                                 req.args.get('from_month'),
                                 req.args.get('from_day'))

        else:
            from_date = datetime.datetime(now.year, now.month, now.day)
            from_date = from_date - datetime.timedelta(days=7) # 1 week ago, by default
        if 'to_year' in req.args:
            to_date = get_date(req.args['to_year'], 
                                 req.args.get('to_month'),
                                 req.args.get('to_day'),
                                 end_of_day=True)
        else:
            to_date = now
        
        data['from_date'] = from_date
        data['to_date'] = to_date
        data['prev_week'] = from_date - datetime.timedelta(days=7)
        args = dict(req.args)
        args['from_year'] = data['prev_week'].year
        args['from_month'] = data['prev_week'].month
        args['from_day'] = data['prev_week'].day
        args['to_year'] = from_date.year
        args['to_month'] = from_date.month
        args['to_day'] = from_date.day

        data['prev_url'] = req.href('/hours/user', **args)
    def search_by_date_range(cls):
        """
        List tasks in date range
        """
        utils.clear_screen()
        print("Please enter start and end dates to search for a date range:\n")
        total = 0
        while total == 0:
            start_date = None
            end_date = None
            while not start_date:
                print("Start Date:")
                start_date = utils.get_date()
            while not end_date:
                print("End Date:")
                end_date = utils.get_date()

            tasks = Task.select().where((Task.date > start_date)
                                        & (Task.date < end_date)).order_by(
                                            Task.date.desc())
            print("No results found... Please retry.")
            total = len(tasks)
        else:
            return {
                'tasks': tasks,
                'start_date': start_date,
                'end_date': end_date,
            }
Exemple #8
0
def fe4():
    prefix = '双公示-法人行政许可信息'
    df, train_num = utils.get_df(prefix, ['企业名称', '许可决定日期', '许可截止期'])
    df[prefix + '_许可决定日期'] = df[prefix + '_许可决定日期'].astype('str').apply(
        lambda x: utils.get_date(x) if x != 'nan' else np.nan)
    df[prefix + '_许可截止期'] = df[prefix + '_许可截止期'].astype('str').apply(
        lambda x: utils.get_date(x) if x != 'nan' else np.nan)
    df[prefix +
       '_许可决定日期_许可截止期_diff'] = df[prefix + '_许可决定日期'] - df[prefix + '_许可截止期']
    group = df.groupby('企业名称', as_index=False)
    df = pd.merge(df,
                  utils.get_agg(group, prefix + '_许可决定日期',
                                ['max', 'min', 'mean', 'diff_mean']),
                  on='企业名称',
                  how='left')
    df = pd.merge(df,
                  utils.get_agg(group, prefix + '_许可截止期',
                                ['max', 'min', 'mean']),
                  on='企业名称',
                  how='left')
    df = pd.merge(df,
                  utils.get_agg(group, prefix + '_许可决定日期_许可截止期_diff',
                                ['max', 'min', 'mean']),
                  on='企业名称',
                  how='left')
    for f in ['许可决定日期', '许可截止期', '许可决定日期_许可截止期_diff']:
        del df[prefix + '_' + f]
    train_df, test_df = df[:train_num], df[train_num:]
    train_df.drop_duplicates('企业名称', inplace=True)
    test_df.drop_duplicates('企业名称', inplace=True)
    return prefix, train_df, test_df
Exemple #9
0
    def date_data(self, req, data):
        """data for the date"""
        now = datetime.datetime.now()
        data['days'] = range(1, 32)
        data['months'] = list(enumerate(calendar.month_name))
        data['years'] = range(now.year, now.year - 10, -1)
        if 'from_year' in req.args:
            from_date = get_date(req.args['from_year'],
                                 req.args.get('from_month'),
                                 req.args.get('from_day'))

        else:
            from_date = datetime.datetime(now.year, now.month, now.day)
            from_date = from_date - datetime.timedelta(days=7)
        if 'to_year' in req.args:
            to_date = get_date(req.args['to_year'],
                               req.args.get('to_month'),
                               req.args.get('to_day'),
                               end_of_day=True)
        else:
            to_date = now

        data['from_date'] = from_date
        data['to_date'] = to_date
        data['prev_week'] = from_date - datetime.timedelta(days=7)
        args = dict(req.args)
        args['from_year'] = data['prev_week'].year
        args['from_month'] = data['prev_week'].month
        args['from_day'] = data['prev_week'].day
        args['to_year'] = from_date.year
        args['to_month'] = from_date.month
        args['to_day'] = from_date.day

        data['prev_url'] = req.href('/hours/user', **args)
Exemple #10
0
def cli():
    parser = OptionParser()
    parser.add_option("-b", "--bad", dest="bad_date",help="first known bad nightly build, default is today",
                      metavar="YYYY-MM-DD", default=str(datetime.date.today()))
    parser.add_option("-g", "--good", dest="good_date",help="last known good nightly build",
                      metavar="YYYY-MM-DD", default=None)
    parser.add_option("-e", "--addons", dest="addons",help="list of addons to install", metavar="PATH1,PATH2", default="")
    parser.add_option("-p", "--profile", dest="profile", help="profile to use with nightlies", metavar="PATH")
    parser.add_option("-a", "--args", dest="cmdargs", help="command-line arguments to pass to the application",
                      metavar="ARG1,ARG2", default="")
    parser.add_option("-n", "--app", dest="app", help="application name (firefox, mobile or thunderbird)",
                      metavar="[firefox|mobile|thunderbird]", default="firefox")
    parser.add_option("-r", "--repo", dest="repo_name", help="repository name on ftp.mozilla.org",
                      metavar="[tracemonkey|mozilla-1.9.2]", default=None)
    (options, args) = parser.parse_args()

    addons = strsplit(options.addons, ",")
    cmdargs = strsplit(options.cmdargs, ",")

    if not options.good_date:
        options.good_date = "2009-01-01"
        print "No 'good' date specified, using " + options.good_date

    runner = NightlyRunner(appname=options.app, addons=addons, repo_name=options.repo_name,
                           profile=options.profile, cmdargs=cmdargs)
    bisector = Bisector(runner, appname=options.app)
    bisector.bisect(get_date(options.good_date), get_date(options.bad_date))
Exemple #11
0
    def process_scheduling_update(self, new_scheduling):
        [index, schedules] = self.get_scheduling(new_scheduling['identifier'])

        if index != -1:
            must_be_stored = True
            configs = get_configs()
            scheduling = schedules[index]

            self.delete_scheduling(new_scheduling['identifier'])  # remove old
            scheduling.update(new_scheduling['data'])
            self.process_scheduling_create(scheduling, configs['devices'],
                                           must_be_stored)  # insert updated

            return json.dumps({
                "data": "successful update",
                "type": "scheduling_reply",
                "gathered_at": get_date(),
                "identifier": scheduling['identifier']
            })
        else:
            return json.dumps({
                "data":
                "update failed, the scheduling does not exist or has already been processed",
                "type": "scheduling_reply",
                "gathered_at": get_date(),
                "identifier": new_scheduling['identifier']
            })
Exemple #12
0
def cli():
    parser = OptionParser()
    parser.add_option("-b", "--bad", dest="bad_date",help="first known bad nightly build, default is today",
                      metavar="YYYY-MM-DD", default=str(datetime.date.today()))
    parser.add_option("-g", "--good", dest="good_date",help="last known good nightly build",
                      metavar="YYYY-MM-DD", default=None)
    parser.add_option("-e", "--addons", dest="addons",help="list of addons to install", metavar="PATH1,PATH2", default="")
    parser.add_option("-p", "--profile", dest="profile", help="profile to use with nightlies", metavar="PATH")
    parser.add_option("-a", "--args", dest="cmdargs", help="command-line arguments to pass to the application",
                      metavar="ARG1,ARG2", default="")
    parser.add_option("-n", "--app", dest="app", help="application name (firefox, fennec or thunderbird)",
                      metavar="[firefox|fennec|thunderbird]", default="firefox")
    parser.add_option("-r", "--repo", dest="repo_name", help="repository name on ftp.mozilla.org",
                      metavar="[tracemonkey|mozilla-1.9.2]", default=None)
    parser.add_option("--bits", dest="bits", help="force 32 or 64 bit version (only applies to x86_64 boxes)",
                      choices=("32","64"), default=mozinfo.bits)
    parser.add_option("--persist", dest="persist", help="the directory in which files are to persist ie. /Users/someuser/Documents")
    parser.add_option("--inbound", action="store_true", dest="inbound", help="use inbound instead of nightlies (use --good-rev and --bad-rev options")
    parser.add_option("--bad-rev", dest="firstBadRevision",help="first known bad revision (use with --inbound)")
    parser.add_option("--good-rev", dest="lastGoodRevision",help="last known good revision (use with --inbound)")

    (options, args) = parser.parse_args()

    options.bits = parseBits(options.bits)

    addons = strsplit(options.addons, ",")
    cmdargs = strsplit(options.cmdargs, ",")

    inboundRunner = None
    if options.app == "firefox" or options.app == "fennec":
        inboundRunner = InboundRunner(appname=options.app,
                                      addons=addons,
                                      repo_name=options.repo_name,
                                      profile=options.profile,
                                      cmdargs=cmdargs, bits=options.bits,
                                      persist=options.persist)

    if options.inbound:
        if not options.lastGoodRevision or not options.firstBadRevision:
            print "If bisecting inbound, both --good-rev and --bad-rev " \
                " must be set"
            sys.exit(1)
        bisector = Bisector(None, inboundRunner, appname=options.app,
                            lastGoodRevision=options.lastGoodRevision,
                            firstBadRevision=options.firstBadRevision)
        bisector.bisect_inbound()
    else:
        if not options.good_date:
            options.good_date = "2009-01-01"
            print "No 'good' date specified, using " + options.good_date
        nightlyRunner = NightlyRunner(appname=options.app, addons=addons,
                                      repo_name=options.repo_name,
                                      profile=options.profile, cmdargs=cmdargs,
                                      bits=options.bits,
                                      persist=options.persist)
        bisector = Bisector(nightlyRunner, inboundRunner, appname=options.app)
        bisector.bisect_nightlies(get_date(options.good_date),
                                  get_date(options.bad_date))
Exemple #13
0
def validate_reservation(slots, slot_details):
    city = utils.try_ex(lambda: slots['City'])
    cuisine = utils.try_ex(lambda: slots['Cuisine'])
    dinner_date = utils.try_ex(lambda: slots['Date'])
    dinner_time = utils.try_ex(lambda: slots['Time'])
    phone = utils.try_ex(lambda: slots['Phone'])

    if city and not isvalid_city(city):
        return utils.build_validation_result(
            False, 'City', 'We currently only support new york. '.format(city))

    if cuisine and not isvalid_cuisine(cuisine):
        return utils.build_validation_result(
            False, 'Cuisine',
            'We currently do not support {} as a valid cuisine.  Can you try a different cuisine?'
            .format(cuisine))

    if dinner_date:
        if not isvalid_date(dinner_date):
            return utils.build_validation_result(
                False, 'Date', 'That is an invalid date. '
                'On what date would you like to have dinner?')
        if utils.get_date(dinner_date) < utils.get_today():
            return utils.build_validation_result(
                False, 'Date',
                'Reservations must be scheduled at least one day in advance. '
                'Can you try a different date?')

    if dinner_time:
        if utils.get_date(dinner_date) == utils.get_today(
        ) and not in_at_least_one_hour(dinner_time):
            return utils.build_validation_result(
                False, 'Time',
                'Reservations must be scheduled at least one hour in advance. '
                'Can you try a later hour?')
    else:
        if slot_details and 'Time' in slot_details.keys():
            resolutions = slot_details['Time']['resolutions']
            if len(resolutions) > 0:
                first_resolution = resolutions[0]['value']
                if first_resolution == "00:00":
                    slots['Time'] = "00:00"
                else:
                    return utils.build_validation_result(
                        False, 'Time',
                        'Please let me know whether it is {}AM or {}PM. '.
                        format(first_resolution, first_resolution))

    if phone and not isvalid_phone(phone):
        return utils.build_validation_result(
            False, 'Phone',
            'The number you provided is invalid for a US phone. '
            'Could you say your phone number again?')

    return {'isValid': True}
Exemple #14
0
  def generate_plot(self):
    timestamps = [get_timestamp(t['created_at']) for t in self.source.iter_tweets()]
    bins = self.get_time_bins(timestamps)
    tally = [0] * self.steps
    for tstamp in timestamps:
      j = self.get_bin(tstamp, bins)
      tally[j] = tally[j] + 1

    plt.plot(bins, tally)
    dates = ["%s:%s" %(get_date(a).hour, get_date(a).minute) for i,a in enumerate(bins) if i%3 == 0]
    plt.xticks([b for i, b in enumerate(bins) if i%3 ==0], dates, rotation=0)
Exemple #15
0
def fe1():
    prefix = '企业基本信息&高管信息&投资信息'
    df, train_num = utils.get_df(prefix, ['企业名称', '注册资金', '注册资本(金)币种名称', '企业(机构)类型名称',
                                          '行业门类代码', '成立日期', '核准日期', '住所所在地省份', '姓名',
                                          '法定代表人标志', '首席代表标志', '职务', '投资人', '出资比例'])
    df[prefix + '_出资比例'] = df[prefix + '_出资比例'].apply(lambda x: x if x <= 1 else x / 100)
    df[prefix + '_注册资金'] = df[[prefix + '_注册资金', prefix + '_注册资本(金)币种名称']].apply(
        lambda x: x[prefix + '_注册资金'] if x[prefix + '_注册资本(金)币种名称'] not in utils.exch_rate.keys()
        else x[prefix + '_注册资金'] * utils.exch_rate[x[prefix + '_注册资本(金)币种名称']], axis=1).fillna(0)
    df[prefix + '_注册资金_binning'] = df[prefix + '_注册资金'].apply(
        lambda x: utils.binning(x, [300, 500, 1000, 3000, 6000]))
    df[prefix + '_成立日期'] = df[prefix + '_成立日期'].astype('str').apply(
        lambda x: utils.get_date(x[:10]) if x != 'nan' else np.nan)
    df[prefix + '_核准日期'] = df[prefix + '_核准日期'].apply(lambda x: utils.get_date(x[:10]))
    df[prefix + '_成立日期_核准日期_diff'] = df[prefix + '_成立日期'] - df[prefix + '_核准日期']
    df[prefix + '_法定代表人职务'] = df[[prefix + '_法定代表人标志', prefix + '_职务']].apply(
        lambda x: x[prefix + '_职务'] if x[prefix + '_法定代表人标志'] == '是' else np.nan, axis=1)
    df[prefix + '_首席代表职务'] = df[[prefix + '_首席代表标志', prefix + '_职务']].apply(
        lambda x: x[prefix + '_职务'] if x[prefix + '_首席代表标志'] == '是' else np.nan, axis=1)
    df = pd.merge(df, df.dropna(subset=[prefix + '_姓名']).groupby(
        prefix + '_姓名', as_index=False)['企业名称'].agg({prefix + '_姓名_企业名称_nunique': 'nunique'}),
                  on=prefix + '_姓名', how='left')
    df = pd.merge(df, df.dropna(subset=[prefix + '_投资人']).groupby(
        prefix + '_投资人', as_index=False)['企业名称'].agg({prefix + '_投资人_企业名称_nunique': 'nunique'}),
                  on=prefix + '_投资人', how='left')
    group = df.groupby('企业名称', as_index=False)
    df = pd.merge(df, utils.get_agg(group, prefix + '_姓名', ['nunique']), on='企业名称', how='left')
    df = pd.merge(df, utils.get_agg(group, prefix + '_投资人', ['nunique']), on='企业名称', how='left')
    df = pd.merge(df, utils.get_agg(group, prefix + '_姓名_企业名称_nunique', ['max', 'mean', 'sum']),
                  on='企业名称', how='left')
    df = pd.merge(df, utils.get_agg(group, prefix + '_投资人_企业名称_nunique', ['max', 'mean', 'sum']),
                  on='企业名称', how='left')
    df = pd.merge(df, utils.get_agg(group, prefix + '_出资比例', ['max', 'min', 'mean']), on='企业名称', how='left')
    f_pairs = [
        [prefix + '_住所所在地省份', prefix + '_企业(机构)类型名称'],
        [prefix + '_住所所在地省份', prefix + '_行业门类代码'],
        [prefix + '_注册资金_binning', prefix + '_企业(机构)类型名称'],
        [prefix + '_注册资金_binning', prefix + '_行业门类代码'],
        [prefix + '_企业(机构)类型名称', prefix + '_行业门类代码']
    ]
    df = utils.get_ratio(df, f_pairs)
    for f in ['注册资本(金)币种名称', '姓名', '法定代表人标志', '首席代表标志', '职务', '投资人', '出资比例',
              '姓名_企业名称_nunique', '投资人_企业名称_nunique']:
        del df[prefix + '_' + f]
    train_df, test_df = df[:train_num], df[train_num:]
    train_df.drop_duplicates('企业名称', inplace=True)
    train_df.dropna(subset=[prefix + '_成立日期'], inplace=True)
    test_df.drop_duplicates('企业名称', inplace=True)
    for f in ['法定代表人职务', '首席代表职务', '企业(机构)类型名称', '行业门类代码', '住所所在地省份']:
        label_dict = dict(zip(train_df[prefix + '_' + f].unique(), range(train_df[prefix + '_' + f].nunique())))
        train_df[prefix + '_' + f] = train_df[prefix + '_' + f].map(label_dict).fillna(-1).astype('int16')
        test_df[prefix + '_' + f] = test_df[prefix + '_' + f].map(label_dict).fillna(-1).astype('int16')
    return prefix, train_df, test_df
Exemple #16
0
 def search_by_date_range(self):
     """
     List tasks in date range
     """
     utils.clear_screen()
     print("Please enter start and end dates to search for a date range:\n")
     print("Start date: ")
     start_date = utils.get_date()
     print("End date: ")
     end_date = utils.get_date()
     tasks = filter(
         lambda task: utils.convert_string_to_date(task.date) >= start_date
         and utils.convert_string_to_date(task.date) < end_date, self.tasks)
     return list(tasks)
Exemple #17
0
def get_stock_data(csv_data):

    return StockPrice(
        stock_name=csv_data[0].strip(),
        price= float(csv_data[2].strip()),
        date=get_date(csv_data[1].strip())
    )
Exemple #18
0
    def __init__(self, title, lang, num_pages, total_time):
        self.start_content = \
            u'<?xml version="1.0" encoding="utf-8"?>\n' +\
            u'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "xhtml1-transitional.dtd">\n' +\
            u'<html xmlns="http://www.w3.org/1999/xhtml">\n' +\
            u'<head>\n' +\
            u'<title>{}</title>\n'.format(title) +\
            u'<meta name="dc:title" content="{}"/>\n'.format(title) +\
            u'<meta name="dc:format" content="Daisy 2.02"/>\n' +\
            u'<meta name="dc:identifier" content="CVIT_DTB"/>\n' +\
            u'<meta name="dc:publisher" content="TTSDaisy_v4"/>\n' +\
            u'<meta name="dc:date" content="{}" scheme="yyyy-mm-dd"/>\n'.format(get_date()) +\
            u'<meta name="dc:language" content="{}" scheme="ISO 639"/>\n'.format(lang) +\
            u'<meta name="ncc:charset" content="utf-8"/>\n' +\
            u'<meta name="ncc:footnotes" content="0"/>\n' +\
            u'<meta name="ncc:pageFront" content="0"/>\n' +\
            u'<meta name="ncc:pageNormal" content="{}"/>\n'.format(num_pages) +\
            u'<meta name="ncc:pageSpecial" content="0"/>\n' +\
            u'<meta name="ncc:prodNotes" content="0"/>\n' +\
            u'<meta name="ncc:sidebars" content="0"/>\n' +\
            u'<meta name="ncc:setInfo" content="1 of 1"/>\n' +\
            u'<meta name="ncc:tocItems" content="{}"/>\n'.format(num_pages) +\
            u'<meta name="ncc:totalTime" content="{}" scheme="hh:mm:ss"/>\n'.format(get_time_label(total_time, False)) +\
            u'<meta name="ncc:files" content="{}"/>\n'.format(3*num_pages + 1) +\
            u'<meta name="ncc:generator" content="TTSDaisy_v4"/>\n' +\
            u'<meta name="ncc:narrator" content="espeak"/>\n' +\
            u'<meta http-equiv="Content-type" content="text/html; charset=utf-8"/>\n' +\
            u'</head>\n<body>\n'

        self.content = []

        self.end_content = u'</body>\n</html>\n'
Exemple #19
0
 def __init__(self,data):
     self._type          = "attack.daily.stats"
     self._identifier    = data["identifier"]
     self._date          = utils.get_date(data['timestamp'])
     self._counts        = 1
     self._hours         = utils.get_hour(data["timestamp"])
     self._hourly        = {self._hours: 1}
def count_speeches(documents, all_names):
    month_translation = {"jan":"01", "feb":"02", "march":"03", "april":"04", "may":"05", \
    "june":"06", "july":"07", "aug":"08", "sept":"09", "oct":"10", \
    "nov":"11", "dec":"12"}

    total_speeches = defaultdict(int)
    keyword_speeches = defaultdict(int)
    all_dates = []

    for document in documents:
        name = document[0]  # By:
        date = get_date(document[1], granularity)  # Date:
        all_dates.append(date)

        body = document[2]
        for gov in all_names:
            if gov in name:  #often times there are multiple authors listed
                total_speeches[(date, gov)] += 1
                for keyword in keywords:
                    if keyword in body:
                        keyword_speeches[(date, gov)] += 1
                        break  #if any of keywords found, break

    all_dates = sorted(list(set(all_dates)))
    # print "dates", all_dates
    return total_speeches, keyword_speeches, all_dates
Exemple #21
0
def shorten():

    shortcode = ""

    if request.method == 'POST':
        received = request.get_json(force=True)

        url = received["url"] if received["url"] else ""

        if len(url) < 2 or utils.check_url(url) == False:
            return no_url()

        conn = utils.create_connection("test.db")

        check = utils.check_entry(url, conn)
        db_url = check[1] if check else False

        if db_url and db_url == url:
            conn.close()
            return already_used()

        try:
            shortcode = received["shortcode"]
        except KeyError:
            logging.warn("No shortcode provided, generating one...")
            shortcode = utils.make_key(6)

        if utils.check_shortcode(shortcode) == False:
            conn.close()
            return invalid_code()

    _date = utils.get_date()
    utils.new_entry(url, shortcode, _date, _date, conn)
    conn.close()
    return flask.make_response(shortcode, 201)
Exemple #22
0
def web_shorten(url):

    url = url.strip()

    if len(url) < 2 or utils.check_url(url) == False:
        return no_url()

    conn = utils.create_connection("test.db")

    check = utils.check_entry(url, conn)

    db_url = check[1] if check else False

    if db_url and db_url == url:
        conn.close()
        return already_used()

    shortcode = utils.make_key(6)

    _date = utils.get_date()

    utils.new_entry(url, shortcode, _date, _date, conn)
    conn.close()

    return shortcode
Exemple #23
0
 def delete_container_ndays_ago(token, project_id, delete_days_ago=7):
     """删除n天之前的容器以及文件"""
     # 获取所有容器
     containers = OSSOperate.list_containers(token, project_id)
     if not containers:
         return
     # n天前的时间戳
     time_days_ago = int(time.time()) - (delete_days_ago * 86400)
     # n天前的时间 e.g: 20171115
     date_days_ago = get_date(time_days_ago)
     for container in containers:
         container_name = container.get("name")
         if container_name.startswith("kolla"):
             # 得到容器创建的时间
             container_date = int(container_name[5:])
             # 容器创建小于n天,删除对象,再删除容器
             if int(date_days_ago) > container_date:
                 objs = OSSOperate.list_container_objs(
                     token, project_id, container_name)
                 for obj in objs:
                     obj_file_name = obj.get("name")
                     OSSOperate.delete_container_objs(
                         token, project_id, container_name, obj_file_name)
                 # 删除容器
                 OSSOperate.delete_container(token, project_id,
                                             container_name)
Exemple #24
0
def fe6():
    prefix = '许可资质年检信息'
    df, train_num = utils.get_df(prefix, ['企业名称', '年检结果', '年检事项名称', '年检日期'])
    df[prefix + '_年检结果'] = df[prefix + '_年检结果'].astype('str').apply(
        lambda x: '合格' if '合格' in x else '通过')
    df[prefix + '_年检事项名称'] = df[prefix + '_年检事项名称'].astype('str').apply(
        lambda x: x if '单位年检' == x or '对道路' in x else '其他')
    df = pd.get_dummies(df, prefix=['dummy_' + prefix + '_年检事项名称', 'dummy_' + prefix + '_年检结果'],
                        columns=[prefix + '_年检事项名称', prefix + '_年检结果'])
    del df['dummy_' + prefix + '_年检事项名称_其他']
    df[prefix + '_年检日期'] = df[prefix + '_年检日期'].astype('str').apply(
        lambda x: utils.get_date(x) if x != 'nan' else np.nan)
    raw_features = df.columns.values[1:]
    group = df.groupby('企业名称', as_index=False)
    for f in raw_features:
        if 'dummy' in f:
            df = pd.merge(df, utils.get_agg(group, f, ['sum', 'mean']), on='企业名称', how='left')
    df = pd.merge(df, group['企业名称'].agg({prefix + '_count': 'count'}), on='企业名称', how='left')
    df = pd.merge(df, utils.get_agg(group, prefix + '_年检日期', ['max', 'min', 'mean', 'diff_mean']),
                  on='企业名称', how='left')
    for f in raw_features:
        del df[f]
    train_df, test_df = df[:train_num], df[train_num:]
    train_df.drop_duplicates('企业名称', inplace=True)
    test_df.drop_duplicates('企业名称', inplace=True)
    return prefix, train_df, test_df
Exemple #25
0
def cli(args=sys.argv[1:]):

    parser = OptionParser()
    parser.add_option("--timestamp", dest="timestamp", help="timestamp of "
                      "inbound build")
    parser.add_option("-a", "--addons", dest="addons",
                      help="list of addons to install",
                      metavar="PATH1,PATH2")
    parser.add_option("-p", "--profile", dest="profile", help="path to profile to user", metavar="PATH")
    parser.add_option("--bits", dest="bits", help="force 32 or 64 bit version (only applies to x86_64 boxes)",
                      choices=("32","64"), default=mozinfo.bits)
    parser.add_option("--persist", dest="persist", help="the directory in which files are to persist ie. /Users/someuser/Documents")

    options, args = parser.parse_args(args)
    if not options.timestamp:
        print "timestamp must be specified"
        sys.exit(1)
    options.bits = parseBits(options.bits)
    # XXX https://github.com/mozilla/mozregression/issues/50
    addons = strsplit(options.addons or "", ",")
    runner = InboundRunner(addons=addons, profile=options.profile,
                           bits=options.bits, persist=options.persist)
    runner.start(get_date(options.date))
    try:
        runner.wait()
    except KeyboardInterrupt:
        runner.stop()
Exemple #26
0
def data_load(args):
    train_file_path = 'data/feature_dict_train.pkl'
    test_file_path = 'data/feature_dict_test.pkl'
    if not args.raw:
        feature_dict_train = pickle.load(open(train_file_path, 'rb'))
        feature_dict_test = pickle.load(open(test_file_path, 'rb'))
    else:
        ## determine the train date
        if args.date:
            train_date = args.date
        else:
            train_date = get_date(1)
        ## load date
        print('train_date is {}'.format(train_date))
        data = Data(train_date, args.gap, args.sample, args.reserve)
        featurelize = Featurelize(args.feature)
        df, test_date = data.load()
        train = df[df['tdate'] != test_date]
        test = df[df['tdate'] == test_date]
        feature_dict_train = featurelize.features_dict_prepare(train)
        feature_dict_test = featurelize.features_dict_prepare(test)
        ## dump data
        pickle.dump(feature_dict_train, open(train_file_path, 'wb'))
        pickle.dump(feature_dict_test, open(test_file_path, 'wb'))
    x_train, y_train = featurelize.input_data_prepare(feature_dict_train)
    x_test, y_test = featurelize.input_data_prepare(feature_dict_test)
    return x_train, y_train, x_test, y_test
Exemple #27
0
    def is_scheduled_for_today(self) -> bool:
        """Check if game is scheduled for today.

        The scoreboard is used to show only the games scheduled for the current day.
        Since the NHL API returns a JSON with games for the entire week, this function is used to pick out the games
        for the current day.

        Returns:
            bool: True if game if scheduled for today.
        """
        date = utils.get_date(0)

        # must be today
        if date.upper() in self.game_clock or \
                'TODAY' in self.game_clock:
            self.game_clock = 'TODAY'
            return True
        # or must be pre-game
        elif 'PRE GAME' in self.game_clock:
            self.game_clock = 'PRE-GAME'
            return True
        # or game must be live
        elif 'LIVE' in self.game_status:
            return True
        return False
Exemple #28
0
    def _fill_info_snippet_pa(self, text, ners, is_RE):

        # it returns a list of dates
        date = utils.get_date(text, False)
        location = []

        "extract person names from the text"
        p_e = utils.person_extractors(text)
        persons_names = p_e.get_person_names()

        # ner_org = ners[0] # organisation information (a list)
        # ner_gpe = ners[1] # geographical position information (a list)
        # ner_person_name = ners[2]
        # ner_date = ners[3]
        """use gazettees from Jorge work"""
        Gazettee_university = list(set(list_organization(text)))
        for item in Gazettee_university:
            location.append(item)

        # location.append(str(ner_gpe[0]))
        # location.append(str(ner_org[0]))
        """use RE check"""
        # if is_RE:
        #    filtered_ne = set()
        #    for item in location:
        #        if re_organization(item.title()) is not None:
        #            filtered_ne.add(item)

        #    location = list(filtered_ne)

        return [list(set(location)), date, persons_names]  # , str(ner_date[0])
 def edit_task(task):
     """
     Edit and update selected task
     """
     print("*" * 25)
     print("Editing task:")
     print("*" * 25)
     print(task.title)
     print("*" * 25)
     print("New Values:")
     print("*" * 25)
     # Request updated values of task from user
     title = input("Title of the task: ")
     username = input("Username: "******"Date of the task")
     date = utils.get_date()
     time_spent = utils.get_time_spent()
     notes = input("Notes (Optional, you can leave this empty): ")
     task.title = title
     task.username = username
     task.date = date
     task.time_spent = time_spent
     task.notes = notes
     # Now save task to database
     task.save()
     print("Task updated")
Exemple #30
0
    def process_operation(self, data, device):
        reply = None

        if device['status'] == True:
            pin = device['pin'] if 'pin' in device else None
            write = data['write'] if 'write' in data else None

            # callback already reply
            cb = self.callback(device['driver'],
                               "operation_reply",
                               device['uuid'],
                               pin,
                               write,
                               identifier=data['identifier'],
                               scheduling=False)
            cb()
        else:
            reply = json.dumps({
                "uuid": device['uuid'],
                "data": "driver_not_enabled",
                "type": "operation_reply",
                "gathered_at": get_date(),
                "identifier": data['identifier']
            })

        return reply
Exemple #31
0
def change_command(user):
    ws = get_worksheet()
    stored_row = ""
    try:
        if "-r" in sys.argv:
            row_number = int(get_argument(sys.argv, "-r"))
            if row_number and row_number >= 2:
                selected_row = ws.row_values(row_number)
                if user == selected_row[0]:
                    stored_row = selected_row
                else:
                    write_error("You can't edit others information")
                    exit()
            else:
                write_error("Invalid row")
                exit()
        else:
            row_number = get_users_last_row(ws, user)
            stored_row = ws.row_values(row_number)

        display_loading_message("Updating info", "Information updated")
        new_date = get_date() if "-d" in sys.argv else stored_row[1]
        new_message = get_argument(
            sys.argv, "-m") if "-m" in sys.argv else stored_row[2]
        new_description = get_argument(sys.argv, "-t") if "-t" in sys.argv else stored_row[3] \
            if len(stored_row) >= 4 else ""

        stored_row = [stored_row[0], new_date, new_message, new_description]
        update_row(ws, stored_row, row_number)

        hide_loading_message_with_error(False)
    except Exception:
        hide_loading_message_with_error(True)
        exit()
def make_productdetails_request(product_and_version, sleep, retry, callback):
    """Query productdetails to get release date"""

    for _ in range(retry):
        r = requests.get(PRODUCT_DETAILS_URL)
        if 'Backoff' in r.headers:
            time.sleep(sleep)
        else:
            release_date_str = None
            try:
                release_date_str = (
                    r.json())['releases'][product_and_version]['date']
            except KeyError as e:
                # ['releases'][product_and_version]['date'] was not found.
                # Should be due to the version not being released yet.
                return utils.get_date('today')
            except BaseException as e:
                logger.error('productdetails query failed')
                logger.error(e, exc_info=True)
                return None
            # The release time is not publicly available. Set it to 6am PDT when
            # releases are often done.
            release_date = datetime.datetime.strptime(release_date_str,
                                                      '%Y-%m-%d')
            release_time = datetime.time(13)
            release_datetime = datetime.datetime.combine(
                release_date, release_time)
            release_datetime = pytz.utc.localize(release_datetime)
            return release_datetime

    logger.error(
        'Too many attempts in make_productdetails_request(retry={})'.format(
            retry))

    return None
Exemple #33
0
def updateStocks():
    date=utils.get_date()
    #print "here"
    if (date == 0 or date < otterapi.create_times()[5]):
        print [date,otterapi.create_times()[5]]
        utils.update_date(otterapi.create_times()[5])
        utils.update_market()
Exemple #34
0
 def process_scheduling_delete(self, scheduling):
     if self.delete_scheduling(scheduling['identifier']):
         return json.dumps({
             "data": "successful delete",
             "type": "scheduling_reply",
             "gathered_at": get_date(),
             "identifier": scheduling['identifier']
         })
     else:
         return json.dumps({
             "data":
             "delete failed, the scheduling does not exist or has already been processed",
             "type": "scheduling_reply",
             "gathered_at": get_date(),
             "identifier": scheduling['identifier']
         })
Exemple #35
0
def cli(args=sys.argv[1:]):
    """moznightly command line entry point"""

    # parse command line options
    parser = OptionParser()
    parser.add_option("-d", "--date", dest="date", help="date of the nightly",
                      metavar="YYYY-MM-DD", default=str(datetime.date.today()))
    parser.add_option("-a", "--addons", dest="addons",
                      help="list of addons to install",
                      metavar="PATH1,PATH2")
    parser.add_option("-p", "--profile", dest="profile", help="path to profile to user", metavar="PATH")
    parser.add_option("-n", "--app", dest="app", help="application name",
                      type="choice",
                      metavar="[%s]" % "|".join(NightlyRunner.apps.keys()),
                      choices=NightlyRunner.apps.keys(),
                      default="firefox")
    parser.add_option("-r", "--repo", dest="repo_name", help="repository name on ftp.mozilla.org",
                      metavar="[tracemonkey|mozilla-1.9.2]", default=None)
    options, args = parser.parse_args(args)
    # XXX https://github.com/mozilla/mozregression/issues/50
    addons = strsplit(options.addons or "", ",")

    # run nightly
    runner = NightlyRunner(appname=options.app, addons=addons,
                           profile=options.profile, repo_name=options.repo_name)
    runner.start(get_date(options.date))
    try:
        runner.wait()
    except KeyboardInterrupt:
        runner.stop()
Exemple #36
0
    def _fill_info_snippet_pa_new_idea(self, action, text, ners, is_RE):

        sent_uni = []
        sent_year = []
        sent_person = []

        #actions[x, x, x, x, x, x, x]
        # if [1, x, x, x, x, x, x] -> keep uni
        # if [x, 1, x, x, x, x, x] -> NO uni
        # if [x, x, 1, x, x, x, x] -> keep years
        # if [x, x, x, 1, x, x, x] -> NO years
        # if [x, x, x, x, 1, x, x] -> Keep both
        # if [x, x, x, x, x, 1, x] -> NOne
        # if [x, x, x, x, x, x, 1] -> change query

        #it returns a list of dates
        date = utils.get_date(text, False)
        location = []

        #ner_org = ners[0] # organisation information (a list)
        #ner_gpe = ners[1] # geographical position information (a list)
        #ner_person_name = ners[2]
        #ner_date = ners[3]
        """use gazettees from Jorge work"""
        Gazettee_university = list(set(list_organization(text)))
        for item in Gazettee_university:
            location.append(item)

        doc = nlp(text)
        people = [ee for ee in doc.ents if ee.label_ == 'PERSON']

        #location.append(str(ner_gpe[0]))
        #location.append(str(ner_org[0]))
        """use RE check"""
        #if is_RE:
        #    filtered_ne = set()
        #    for item in location:
        #        if re_organization(item.title()) is not None:
        #            filtered_ne.add(item)

        #    location = list(filtered_ne)

        "if uni is selected in action"
        if action is None:
            sent_uni = list(set(location))
            sent_year = date
        else:
            if action[0]:
                sent_uni = list(set(location))
            "if years are selected in action"
            if action[2]:
                sent_year = date
            if action[4]:
                sent_uni = list(set(location))
                sent_year = date

        sent_person = list(set(people))

        return [sent_uni, sent_year, sent_person]
Exemple #37
0
def fe9():
    def date_proc(x1, x2):
        if 'nan' == x1 and 'nan' == x2:
            return np.nan
        if x1 != 'nan':
            return utils.get_date(x1)
        return utils.get_date(x2)

    def cert_name(x):
        if '高新技术' in x:
            return '高新技术'
        if '建筑施工' in x:
            return '建筑施工'
        return '其他'

    prefix = '资质登记(变更)信息'
    df, train_num = utils.get_df(prefix,
                                 ['企业名称', '资质名称', '资质生效期', '资质截止期', '认定日期'])
    df[prefix + '_资质名称'] = df[prefix + '_资质名称'].astype('str').apply(cert_name)
    df = pd.get_dummies(df,
                        prefix=['dummy_' + prefix + '_资质名称'],
                        columns=[prefix + '_资质名称'])
    df[prefix + '_资质生效期'] = df[[
        prefix + '_资质生效期', prefix + '_认定日期'
    ]].astype('str').apply(
        lambda x: date_proc(x[prefix + '_资质生效期'], x[prefix + '_认定日期']), axis=1)
    df[prefix +
       '_资质截止期'] = df[prefix +
                      '_资质截止期'].astype('str').apply(lambda x: utils.get_date(
                          x) if x != 'nan' and x[:4] != '1950' else np.nan)
    df[prefix +
       '_资质生效期_资质截止期_diff'] = df[prefix + '_资质生效期'] - df[prefix + '_资质截止期']
    raw_features = df.columns.values[1:]
    group = df.groupby('企业名称', as_index=False)
    for f in raw_features:
        if 'dummy' in f:
            df = pd.merge(df,
                          utils.get_agg(group, f, ['sum', 'mean']),
                          on='企业名称',
                          how='left')
    df = pd.merge(df,
                  utils.get_agg(group, prefix + '_资质生效期',
                                ['max', 'min', 'mean']),
                  on='企业名称',
                  how='left')
    df = pd.merge(df,
                  utils.get_agg(group, prefix + '_资质截止期', ['min']),
                  on='企业名称',
                  how='left')
    df = pd.merge(df,
                  utils.get_agg(group, prefix + '_资质生效期_资质截止期_diff', ['min']),
                  on='企业名称',
                  how='left')
    for f in raw_features:
        del df[f]
    train_df, test_df = df[:train_num], df[train_num:]
    train_df.drop_duplicates('企业名称', inplace=True)
    test_df.drop_duplicates('企业名称', inplace=True)
    return prefix, train_df, test_df
Exemple #38
0
	def test_get_date(self):
		date = get_date()
		# check length and composition of date string
		self.assertEqual(len(date), 8)
		parts_of_date = date.split(".")
		self.assertEqual(len(parts_of_date), 3)		
		for each in parts_of_date:
			self.assertTrue(each.isdigit() and len(each) == 2)
Exemple #39
0
 def process_scheduling_read(self, scheduling):
     schedules = self.get_schedules()
     return json.dumps({
         "data": schedules,
         "type": "scheduling_reply",
         "gathered_at": get_date(),
         "identifier": scheduling['identifier']
     })
Exemple #40
0
def setting():
    if is_login():
        dates = {
            'year': [y for y in range(1900, 1+get_date(time())['year'])],
            'month': [m for m in range(1, 13)],
            'day': [d for d in range(1, 32)]}
        birthday = db_users.Users().get_birthday_dict(db_users.Users().get_uid_by_username(session['username']))
        departments = db_department.Department().get_all_department()
        user = db_users.Users().get_user(session['username'])
        return render_template('login/user_setting.html',
                               user=user,
                               departments=departments,
                               dates=dates,
                               birthday=birthday)
    return redirect('/')
Exemple #41
0
def cli():
    parser = OptionParser()
    parser.add_option("-d", "--date", dest="date", help="date of the nightly",
                      metavar="YYYY-MM-DD", default=str(datetime.date.today()))
    parser.add_option("-a", "--addons", dest="addons", help="list of addons to install",
                      metavar="PATH1,PATH2", default="")
    parser.add_option("-p", "--profile", dest="profile", help="path to profile to user", metavar="PATH")
    parser.add_option("-n", "--app", dest="app", help="application name (firefox or thunderbird)",
                      metavar="[firefox|thunderbird]", default="firefox")
    parser.add_option("-r", "--repo", dest="repo_name", help="repository name on ftp.mozilla.org",
                      metavar="[tracemonkey|mozilla-1.9.2]", default=None)
    (options, args) = parser.parse_args()

    runner = NightlyRunner(appname=options.app, addons=strsplit(options.addons, ","),
                           profile=options.profile, repo_name=options.repo_name)
    runner.start(get_date(options.date))
Exemple #42
0
def general_user_detail(uid):
    from utils import get_date
    import time
    import whuDa.model.department as db_department
    user = db_users.Users().get_user_by_id(uid)
    dates = {
        'year': [y for y in range(1900, 1 + get_date(time.time())['year'])],
        'month': [m for m in range(1, 13)],
        'day': [d for d in range(1, 32)]}
    birthday = db_users.Users().get_birthday_dict(uid)
    departments = db_department.Department().get_all_department()
    return render_template('admin/update_general_user.html',
                           user=user,
                           dates=dates,
                           birthday=birthday,
                           departments=departments)
Exemple #43
0
    def _get_stat(self):
        parts = list()

        for row in calc.get_stat_data(self.user_id, days=31):
            date = row.get('date')
            calories = row.get('calories')

            line = u""

            if date is not None:
                line += u"{} ".format(u.get_date(date))

            if calories is not None:
                    line += u"{0} ккал".format(calories)

            if len(line) > 0:
                parts.append(line)

        return u"\n".join(parts)
Exemple #44
0
def cli(args=sys.argv[1:]):
    """moznightly command line entry point"""

    # parse command line options
    parser = OptionParser()
    parser.add_option("-d", "--date", dest="date", help="date of the nightly",
                      metavar="YYYY-MM-DD", default=str(datetime.date.today()))
    parser.add_option("-a", "--addons", dest="addons",
                      help="list of addons to install",
                      metavar="PATH1,PATH2")
    parser.add_option("-p", "--profile", dest="profile", help="path to profile to user", metavar="PATH")
    parser.add_option("-n", "--app", dest="app", help="application name",
                      type="choice",
                      metavar="[%s]" % "|".join(NightlyRunner.apps.keys()),
                      choices=NightlyRunner.apps.keys(),
                      default="firefox")
    parser.add_option("-r", "--repo", dest="repo_name", help="repository name on ftp.mozilla.org",
                      metavar="[tracemonkey|mozilla-1.9.2]", default=None)
    parser.add_option("--bits", dest="bits", help="force 32 or 64 bit version (only applies to x86_64 boxes)",
                      choices=("32","64"), default=mozinfo.bits)
    parser.add_option("--persist", dest="persist", help="the directory in which files are to persist ie. /Users/someuser/Documents")
    options, args = parser.parse_args(args)

    options.bits = parseBits(options.bits)

    # XXX https://github.com/mozilla/mozregression/issues/50
    addons = strsplit(options.addons or "", ",")

    # run nightly
    runner = NightlyRunner(appname=options.app, addons=addons,
                           profile=options.profile, repo_name=options.repo_name, bits=options.bits,
                           persist=options.persist)
    runner.start(get_date(options.date))
    try:
        runner.wait()
    except KeyboardInterrupt:
        runner.stop()
Exemple #45
0
 def ensure_datetime(self, arg):
   if isinstance(arg, str):
     return get_date(arg)
   elif not isinstance(arg, datetime):
     raise TypeError("Argument to DateFilter should be a timestamp string or a datetime object")  
   return arg
Exemple #46
0
    def display_html(self, req, query):
        """returns the HTML according to a query for /hours view"""
        db = self.env.get_db_cnx()

        # The most recent query is stored in the user session;
        orig_list = None
        orig_time = datetime.now(utc)
        query_time = int(req.session.get('query_time', 0))
        query_time = datetime.fromtimestamp(query_time, utc)
        query_constraints = unicode(query.constraints)
        if query_constraints != req.session.get('query_constraints') \
                or query_time < orig_time - timedelta(hours=1):
            tickets = query.execute(req, db)
            # New or outdated query, (re-)initialize session vars
            req.session['query_constraints'] = query_constraints
            req.session['query_tickets'] = ' '.join([str(t['id'])
                                                     for t in tickets])
        else:
            orig_list = [int(id) for id
                         in req.session.get('query_tickets', '').split()]
            tickets = query.execute(req, db, orig_list)
            orig_time = query_time

        context = Context.from_request(req, 'query')
        ticket_data = query.template_data(context, tickets, orig_list, orig_time, req)

        # For clients without JavaScript, we add a new constraint here if
        # requested
        constraints = ticket_data['constraints']
        if 'add' in req.args:
            field = req.args.get('add_filter')
            if field:
                constraint = constraints.setdefault(field, {})
                constraint.setdefault('values', []).append('')
                # FIXME: '' not always correct (e.g. checkboxes)

        req.session['query_href'] = query.get_href(context.href)
        req.session['query_time'] = to_timestamp(orig_time)
        req.session['query_tickets'] = ' '.join([str(t['id'])
                                                 for t in tickets])

        # data dictionary for genshi
        data = {}

        # get data for saved queries
        query_id = req.args.get('query_id')
        if query_id:
            try:
                query_id = int(query_id)
            except ValueError:
                add_warning(req, "query_id should be an integer, you put '%s'" % query_id)
                query_id = None
        if query_id:
            data['query_id'] = query_id
            query_data = self.get_query(query_id)

            data['query_title'] = query_data['title']
            data['query_description'] = query_data['description']

        data.setdefault('report', None)
        data.setdefault('description', None)

        data['all_columns'] = query.get_all_columns() + self.get_columns()
        # Don't allow the user to remove the id column        
        data['all_columns'].remove('id')
        data['all_textareas'] = query.get_all_textareas()

        # need to re-get the cols because query will remove our fields
        cols = req.args.get('col')
        if isinstance(cols, basestring):
            cols = [cols]
        if not cols:
            cols = query.get_columns() + self.get_default_columns()
        data['col'] = cols

        now = datetime.now()
        # get the date range for the query
        if 'from_year' in req.args:
            from_date = get_date(req.args['from_year'], 
                                 req.args.get('from_month'),
                                 req.args.get('from_day'))

        else:
            from_date = datetime(now.year, now.month, now.day)
            from_date = from_date - timedelta(days=7) # 1 week ago, by default

        if 'to_year' in req.args:
            to_date = get_date(req.args['to_year'], 
                               req.args.get('to_month'),
                               req.args.get('to_day'),
                               end_of_day=True)
        else:
            to_date = now
        
        data['prev_week'] = from_date - timedelta(days=7)
        data['months'] = [ (i, calendar.month_name[i]) for i in range(1,13) ]        
        data['years'] = range(now.year, now.year - 10, -1)
        data['days'] = range(1, 32)
        data['users'] = get_all_users(self.env)
        data['cur_worker_filter'] = req.args.get('worker_filter', '*any')

        data['from_date'] = from_date
        data['to_date'] = to_date

        ticket_ids = [t['id'] for t in tickets]

        # generate data for ticket_times
        time_records = self.get_ticket_hours(ticket_ids, from_date=from_date, to_date=to_date, worker_filter=data['cur_worker_filter'])

        data['query'] = ticket_data['query']
        data['context'] = ticket_data['context']
        data['row'] = ticket_data['row'] 
        if 'comments' in req.args.get('row', []):
            data['row'].append('comments')
        data['constraints'] = ticket_data['constraints']

        our_labels = dict([(f['name'], f['label']) for f in self.fields])
        labels = ticket_data['labels']
        labels.update(our_labels)
        data['labels'] = labels

        order = req.args.get('order')
        desc = bool(req.args.get('desc'))
        data['order'] = order
        data['desc'] = desc

        headers = [{'name': col, 
                    'label' : labels.get(col),
                    'href': self.get_href(query, req.args,
                                          context.href, 
                                          order=col,
                                          desc=(col == order and not desc)
                                          )
                    } for col in cols]

        data['headers'] = headers

        data['fields'] = ticket_data['fields']
        data['modes'] = ticket_data['modes']


        # group time records
        time_records_by_ticket = {}
        for record in time_records:
            id = record['ticket']
            if id not in time_records_by_ticket:
                time_records_by_ticket[id] = []

            time_records_by_ticket[id].append(record)

        data['extra_group_fields'] = dict(ticket = dict(name='ticket', type='select', label='Ticket'),
                                          worker = dict(name='worker', type='select', label='Worker'))

        num_items = 0
        data['groups'] = []

        # merge ticket data into ticket_time records
        for key, tickets in ticket_data['groups']:
            ticket_times = []
            total_time = 0
            total_estimated_time = 0
            for ticket in tickets:
                records = time_records_by_ticket.get(ticket['id'], [])
                [rec.update(ticket) for rec in records]
                ticket_times += records

            # sort ticket_times, if needed
            if order in our_labels:                
                ticket_times.sort(key=lambda x: x[order], reverse=desc)
            data['groups'].append((key, ticket_times))
            num_items += len(ticket_times)


        data['double_count_warning'] = ''

        # group by ticket id or other time_ticket fields if necessary
        if req.args.get('group') in data['extra_group_fields']:
            query.group = req.args.get('group')
            if not query.group == "id":
                data['double_count_warning'] = "Warning: estimated hours may be counted more than once if a ticket appears in multiple groups"

            tickets = data['groups'][0][1]
            groups = {}
            for time_rec in tickets:
                key = time_rec[query.group]
                if not key in groups:
                    groups[key] = []
                groups[key].append(time_rec)
            data['groups'] = sorted(groups.items())

        total_times = dict((k, self.format_hours(sum(rec['seconds_worked'] for rec in v))) for k, v in data['groups'])
        total_estimated_times = {}
        for key, records in data['groups']:
            seen_tickets = set()
            est = 0
            for record in records:
                # do not double-count tickets
                id = record['ticket']
                if id in seen_tickets:
                    continue
                seen_tickets.add(id)
                estimatedhours = record.get('estimatedhours') or 0
                try:
                    estimatedhours = float(estimatedhours)
                except ValueError:
                    estimatedhours = 0
                est +=  estimatedhours * 3600
            total_estimated_times[key] = self.format_hours(est)

        data['total_times'] = total_times
        data['total_estimated_times'] = total_estimated_times

        # format records
        for record in time_records:
            if 'seconds_worked' in record:
                record['seconds_worked'] = self.format_hours(record['seconds_worked']) # XXX misleading name
            if 'time_started' in record:
                record['time_started'] = self.format_date(record['time_started'])
            if 'time_submitted' in record:
                record['time_submitted'] = self.format_date(record['time_submitted'])
            

        data['query'].num_items = num_items
        data['labels'] = ticket_data['labels']
        data['labels'].update(labels)
        data['can_add_hours'] = req.perm.has_permission('TICKET_ADD_HOURS')

        data['multiproject'] = self.env.is_component_enabled(MultiprojectHours)

        from web_ui import TracUserHours
        data['user_hours'] = self.env.is_component_enabled(TracUserHours)

        # return the rss, if requested
        if req.args.get('format') == 'rss':
            return self.queryhours2rss(req, data)

        # return the csv, if requested
        if req.args.get('format') == 'csv':
            self.queryhours2csv(req, data)

        # add rss link
        rss_href = req.href(req.path_info, format='rss')
        add_link(req, 'alternate', rss_href, _('RSS Feed'),
                 'application/rss+xml', 'rss')

        # add csv link
        add_link(req, 'alternate', req.href(req.path_info, format='csv', **req.args), 'CSV', 'text/csv', 'csv')
                
        # add navigation of weeks
        prev_args = dict(req.args)        
        next_args = dict(req.args)
                
        prev_args['from_year'] = (from_date - timedelta(days=7)).year
        prev_args['from_month'] = (from_date - timedelta(days=7)).month
        prev_args['from_day'] = (from_date - timedelta(days=7)).day
        prev_args['to_year'] = from_date.year
        prev_args['to_month'] = from_date.month
        prev_args['to_day'] = from_date.day        
        
        next_args['from_year'] = to_date.year
        next_args['from_month'] = to_date.month
        next_args['from_day'] = to_date.day
        next_args['to_year'] = (to_date + timedelta(days=7)).year
        next_args['to_month'] = (to_date + timedelta(days=7)).month
        next_args['to_day'] = (to_date + timedelta(days=7)).day
        
        add_link(req, 'prev', self.get_href(query, prev_args, context.href), _('Prev Week'))
        add_link(req, 'next', self.get_href(query, next_args, context.href), _('Next Week'))                                            
        prevnext_nav(req, _('Prev Week'), _('Next Week'))
        
        add_ctxtnav(req, 'Cross-Project Hours', req.href.hours('multiproject'))
        add_ctxtnav(req, 'Hours by User', req.href.hours('user', from_day=from_date.day, 
                                                                 from_month=from_date.month, 
                                                                 from_year=from_date.year, 
                                                                 to_day=to_date.year, 
                                                                 to_month=to_date.month, 
                                                                 to_year=to_date.year))
        add_ctxtnav(req, 'Saved Queries', req.href.hours('query/list'))
        
        add_stylesheet(req, 'common/css/report.css')
        add_script(req, 'common/js/query.js')
        
        return ('hours_timeline.html', data, 'text/html')
Exemple #47
0
 def _last_update(self):
     udt = datetime.datetime.utcnow()
     return u"Обновлено: {time} {date}".format(
         date=u.get_date(udt),
         time=u.get_time(udt)
     )
Exemple #48
0
 def clock(self, screen):
     self.set_font(65)
     screen.blit(self.font.render(get_time(), True, WHITE), (80, 10))
     self.set_font(35)
     screen.blit(self.font.render(get_date(), True, WHITE), (70, 80))
Exemple #49
0
    def display_html(self, req, query):
        """returns the HTML according to a query for /hours view"""
        db = self.env.get_db_cnx()

        # The most recent query is stored in the user session;
        orig_list = None
        orig_time = datetime.now(utc)
        query_time = int(req.session.get("query_time", 0))
        query_time = datetime.fromtimestamp(query_time, utc)
        query_constraints = unicode(query.constraints)
        if query_constraints != req.session.get("query_constraints") or query_time < orig_time - timedelta(hours=1):
            tickets = query.execute(req, db)
            # New or outdated query, (re-)initialize session vars
            req.session["query_constraints"] = query_constraints
            req.session["query_tickets"] = " ".join([str(t["id"]) for t in tickets])
        else:
            orig_list = [int(id) for id in req.session.get("query_tickets", "").split()]
            tickets = query.execute(req, db, orig_list)
            orig_time = query_time

        context = Context.from_request(req, "query")
        ticket_data = query.template_data(context, tickets, orig_list, orig_time, req)

        # For clients without JavaScript, we add a new constraint here if
        # requested
        constraints = ticket_data["clauses"][0]
        if "add" in req.args:
            field = req.args.get("add_filter")
            if field:
                constraint = constraints.setdefault(field, {})
                constraint.setdefault("values", []).append("")
                # FIXME: '' not always correct (e.g. checkboxes)

        req.session["query_href"] = query.get_href(context.href)
        req.session["query_time"] = to_timestamp(orig_time)
        req.session["query_tickets"] = " ".join([str(t["id"]) for t in tickets])

        # data dictionary for genshi
        data = {}

        # get data for saved queries
        query_id = req.args.get("query_id")
        if query_id:
            try:
                query_id = int(query_id)
            except ValueError:
                add_warning(req, "query_id should be an integer, you put '%s'" % query_id)
                query_id = None
        if query_id:
            data["query_id"] = query_id
            query_data = self.get_query(query_id)

            data["query_title"] = query_data["title"]
            data["query_description"] = query_data["description"]

        data.setdefault("report", None)
        data.setdefault("description", None)

        data["all_columns"] = query.get_all_columns() + self.get_columns()
        # Don't allow the user to remove the id column
        data["all_columns"].remove("id")
        data["all_textareas"] = query.get_all_textareas()

        # need to re-get the cols because query will remove our fields
        cols = req.args.get("col")
        if isinstance(cols, basestring):
            cols = [cols]
        if not cols:
            cols = query.get_columns() + self.get_default_columns()
        data["col"] = cols

        now = datetime.now()
        # get the date range for the query
        if "from_year" in req.args:
            from_date = get_date(req.args["from_year"], req.args.get("from_month"), req.args.get("from_day"))

        else:
            from_date = datetime(now.year, now.month, now.day)
            from_date = from_date - timedelta(days=7)  # 1 week ago, by default

        if "to_year" in req.args:
            to_date = get_date(req.args["to_year"], req.args.get("to_month"), req.args.get("to_day"), end_of_day=True)
        else:
            to_date = now

        data["prev_week"] = from_date - timedelta(days=7)
        data["months"] = list(enumerate(calendar.month_name))
        data["years"] = range(now.year, now.year - 10, -1)
        data["days"] = range(1, 32)
        data["users"] = get_all_users(self.env)
        data["cur_worker_filter"] = req.args.get("worker_filter", "*any")

        data["from_date"] = from_date
        data["to_date"] = to_date

        ticket_ids = [t["id"] for t in tickets]

        # generate data for ticket_times
        time_records = self.get_ticket_hours(
            ticket_ids, from_date=from_date, to_date=to_date, worker_filter=data["cur_worker_filter"]
        )

        data["query"] = ticket_data["query"]
        data["context"] = ticket_data["context"]
        data["row"] = ticket_data["row"]
        if "comments" in req.args.get("row", []):
            data["row"].append("comments")
        data["constraints"] = ticket_data["clauses"]

        our_labels = dict([(f["name"], f["label"]) for f in self.fields])
        labels = TicketSystem(self.env).get_ticket_field_labels()
        labels.update(our_labels)
        data["labels"] = labels

        order = req.args.get("order")
        desc = bool(req.args.get("desc"))
        data["order"] = order
        data["desc"] = desc

        headers = [
            {
                "name": col,
                "label": labels.get(col),
                "href": self.get_href(query, req.args, context.href, order=col, desc=(col == order and not desc)),
            }
            for col in cols
        ]

        data["headers"] = headers

        data["fields"] = ticket_data["fields"]
        data["modes"] = ticket_data["modes"]

        # group time records
        time_records_by_ticket = {}
        for record in time_records:
            id = record["ticket"]
            if id not in time_records_by_ticket:
                time_records_by_ticket[id] = []

            time_records_by_ticket[id].append(record)

        data["extra_group_fields"] = dict(
            ticket=dict(name="ticket", type="select", label="Ticket"),
            worker=dict(name="worker", type="select", label="Worker"),
        )

        num_items = 0
        data["groups"] = []

        # merge ticket data into ticket_time records
        for key, tickets in ticket_data["groups"]:
            ticket_times = []
            total_time = 0
            total_estimated_time = 0
            for ticket in tickets:
                records = time_records_by_ticket.get(ticket["id"], [])
                [rec.update(ticket) for rec in records]
                ticket_times += records

            # sort ticket_times, if needed
            if order in our_labels:
                ticket_times.sort(key=lambda x: x[order], reverse=desc)
            data["groups"].append((key, ticket_times))
            num_items += len(ticket_times)

        data["double_count_warning"] = ""

        # group by ticket id or other time_ticket fields if necessary
        if req.args.get("group") in data["extra_group_fields"]:
            query.group = req.args.get("group")
            if not query.group == "id":
                data[
                    "double_count_warning"
                ] = "Warning: estimated hours may be counted more than once if a ticket appears in multiple groups"

            tickets = data["groups"][0][1]
            groups = {}
            for time_rec in tickets:
                key = time_rec[query.group]
                if not key in groups:
                    groups[key] = []
                groups[key].append(time_rec)
            data["groups"] = sorted(groups.items())

        total_times = dict((k, self.format_hours(sum(rec["seconds_worked"] for rec in v))) for k, v in data["groups"])
        total_estimated_times = {}
        for key, records in data["groups"]:
            seen_tickets = set()
            est = 0
            for record in records:
                # do not double-count tickets
                id = record["ticket"]
                if id in seen_tickets:
                    continue
                seen_tickets.add(id)
                estimatedhours = record.get("estimatedhours") or 0
                try:
                    estimatedhours = float(estimatedhours)
                except ValueError:
                    estimatedhours = 0
                est += estimatedhours * 3600
            total_estimated_times[key] = self.format_hours(est)

        data["total_times"] = total_times
        data["total_estimated_times"] = total_estimated_times

        # format records
        for record in time_records:
            if "seconds_worked" in record:
                record["seconds_worked"] = self.format_hours(record["seconds_worked"])  # XXX misleading name
            if "time_started" in record:
                record["time_started"] = self.format_date(record["time_started"])
            if "time_submitted" in record:
                record["time_submitted"] = self.format_date(record["time_submitted"])

        data["query"].num_items = num_items
        data["labels"] = TicketSystem(self.env).get_ticket_field_labels()
        data["labels"].update(labels)
        data["can_add_hours"] = req.perm.has_permission("TICKET_ADD_HOURS")

        data["multiproject"] = self.env.is_component_enabled(MultiprojectHours)

        from web_ui import TracUserHours

        data["user_hours"] = self.env.is_component_enabled(TracUserHours)

        # return the rss, if requested
        if req.args.get("format") == "rss":
            return self.queryhours2rss(req, data)

        # return the csv, if requested
        if req.args.get("format") == "csv":
            self.queryhours2csv(req, data)

        # add rss link
        rss_href = req.href(req.path_info, format="rss")
        add_link(req, "alternate", rss_href, _("RSS Feed"), "application/rss+xml", "rss")

        # add csv link
        add_link(req, "alternate", req.href(req.path_info, format="csv", **req.args), "CSV", "text/csv", "csv")

        # add navigation of weeks
        prev_args = dict(req.args)
        next_args = dict(req.args)

        prev_args["from_year"] = (from_date - timedelta(days=7)).year
        prev_args["from_month"] = (from_date - timedelta(days=7)).month
        prev_args["from_day"] = (from_date - timedelta(days=7)).day
        prev_args["to_year"] = from_date.year
        prev_args["to_month"] = from_date.month
        prev_args["to_day"] = from_date.day

        next_args["from_year"] = to_date.year
        next_args["from_month"] = to_date.month
        next_args["from_day"] = to_date.day
        next_args["to_year"] = (to_date + timedelta(days=7)).year
        next_args["to_month"] = (to_date + timedelta(days=7)).month
        next_args["to_day"] = (to_date + timedelta(days=7)).day

        add_link(req, "prev", self.get_href(query, prev_args, context.href), _("Prev Week"))
        add_link(req, "next", self.get_href(query, next_args, context.href), _("Next Week"))
        prevnext_nav(req, _("Prev Week"), _("Next Week"))

        add_ctxtnav(req, "Cross-Project Hours", req.href.hours("multiproject"))
        add_ctxtnav(
            req,
            "Hours by User",
            req.href.hours(
                "user",
                from_day=from_date.day,
                from_month=from_date.month,
                from_year=from_date.year,
                to_day=to_date.year,
                to_month=to_date.month,
                to_year=to_date.year,
            ),
        )
        add_ctxtnav(req, "Saved Queries", req.href.hours("query/list"))

        add_stylesheet(req, "common/css/report.css")
        add_script(req, "common/js/query.js")

        return ("hours_timeline.html", data, "text/html")