Exemple #1
0
def do_activity(activity_3_times=constant.activity3_treasure_hunt_times,
                my_coin=0,
                coin=45000 * 10,
                people_list=None,
                confession_statement_list=None,
                is_run_list=None):
    if is_run_list == None:
        is_run_list = [True, True, False, True]
    while len(is_run_list) < 4:
        is_run_list.append(False)
    print(is_run_list)
    if is_run_list[0]:
        do_activity_1()
    # 关于时间的获取,以后会根据源代码或数学关系来控制
    time_day = utils.get_time(2)
    time_m = utils.get_time(1)
    if time_m == 8:
        if is_run_list[1] and 14 <= time_day <= 22:
            do_activity_2(type=1)  #集市排名类型 未实现
        if is_run_list[2] and 22 <= time_day <= 27:
            print('ac3')  #寻宝
            do_activity_3(times=activity_3_times, coin=coin, my_coin=my_coin)
        if is_run_list[3] and 25 <= time_day:
            # 表白
            do_activity_4(people_list=people_list,
                          confession_statement_list=confession_statement_list)
    if time_m == 9:
        if is_run_list[1] and 2 <= time_day <= 6:
            do_activity_2(type=2)  # 集市抽奖类型
        if is_run_list[2] and 7 <= time_day <= 13:
            do_activity_open_redbag()
        if is_run_list[3] and 14 <= time_day <= 20:
            do_activity_3(times=activity_3_times, coin=coin, my_coin=my_coin)
def extract_coverage(file, save_top_duplicated_entries, n=10):
    print utils.get_time()
    print 'loading the data from file...'
    covs=[]
    lengths=[]
    lines=[]
    with open(file,'r') as f:
        data = f.readline()
        while data:
            entries = data.split()
            if len(entries) < 4:
                continue
            start = long(entries[1])
            end = long(entries[2])
            length = end - start
            coverage = int(entries[3])
            covs.append(coverage)
            lengths.append(length)
            lines.append(data)
            data = f.readline()

    if save_top_duplicated_entries:
        z = zip(lengths,lines)
        sorted_z = sorted(z, key=lambda x: x[0])
        sorted_z=sorted_z[::-1]
        print [i for i,_ in sorted_z[:n]]
Exemple #3
0
    def receive_data(self):
        """

        :return:
        """
        bd_addr = "00:18:E4:35:03:3A"
        port = 1
        sock = BluetoothSocket(RFCOMM)
        sock.connect((bd_addr, port))
        self.display_info('Sistema listo', 1)
        while 1:
            data = sock.recv(10)
            data = str(data, 'utf-8')
            print(data)

            if '1' in data:
                actual_time = get_time()
                self.mylcd.lcd_clear()
                msg = 'Ping en {}'.format(actual_time)
                send_time(self.db, self.user_id)
                self.display_info('Sistema listo', 1)
                self.display_info(msg, 2)
            if '2' in data:
                actual_time = get_time()
                self.mylcd.lcd_clear()
                msg = 'ALARMA {}'.format(actual_time)
                send_time(self.db, self.user_id)
                self.display_info('Sistema listo', 1)
                self.display_info(msg, 2)
            sleep(2.0)
        sock.close()
Exemple #4
0
    def save_state(self,
                   conf,
                   accuracy,
                   to_save_folder=False,
                   extra=None,
                   model_only=False):
        if to_save_folder:
            save_path = conf.save_path
        else:
            save_path = conf.model_path

        os.makedirs('work_space/models', exist_ok=True)
        torch.save(
            self.model.state_dict(),
            str(save_path) +
            ('/model_{}_accuracy:{:.3f}_step:{}_{}.pth'.format(
                get_time(), accuracy, self.step, extra)))
        if not model_only:
            torch.save(
                self.head.state_dict(),
                str(save_path) +
                ('/head_{}_accuracy:{:.3f}_step:{}_{}.pth'.format(
                    get_time(), accuracy, self.step, extra)))
            torch.save(
                self.optimizer.state_dict(),
                str(save_path) +
                ('/optimizer_{}_accuracy:{:.3f}_step:{}_{}.pth'.format(
                    get_time(), accuracy, self.step, extra)))
            if conf.discriminator:
                torch.save(
                    self.growup.state_dict(),
                    str(save_path) +
                    ('/growup_{}_accuracy:{:.3f}_step:{}_{}.pth'.format(
                        get_time(), accuracy, self.step, extra)))
Exemple #5
0
 def process_item(self, item, spider):
     type_ = item['type']
     year = item['year']
     result = re.findall(r'\s', year)
     year = re.sub(r'\s', '', year) if result else year
     share_code = item['code']
     revenue = item['revenue']
     net_profit = item['net_profit']
     un_net_profit = item['un_net_profit']
     aps = item['asset_per_share']
     avps = item['asset_value_per_share']
     afps = item['accu_fund_per_share']
     rpps = item['retained_profit_per_share']
     cfps = item['cash_flow_per_share']
     roe = item['return_on_equity']
     roed = item['return_on_equity_diminish']
     alr = item['asset_liability_ratio']
     npcg = item['net_profit_compared_growth']
     rcg = item['revenue_compared_growth']
     revenue_YOY = item['revenue_YOY']
     net_profit_YOY = item['net_profit_YOY']
     un_net_profit_YOY = item['un_net_profit_YOY']
     apsy = item['asset_per_share_YOY']
     avpsy = item['asset_value_per_share_YOY']
     afpsy = item['accu_fund_per_share_YOY']
     rppsy = item['retained_profit_per_share_YOY']
     cfpsy = item['cash_flow_per_share_YOY']
     roey = item['return_on_equity_YOY']
     roedy = item['return_on_equity_diminish_YOY']
     alry = item['asset_liability_ratio_YOY']
     npcgy = item['net_profit_compared_growth_YOY']
     rcgy = item['revenue_compared_growth_YOY']
     create_time = get_time().split(" ")[0]
     update_time = get_time().split(" ")[0]
     column = [
         'revenue', 'net_profit', 'un_net_profit', 'asset_per_share',
         'asset_value_per_share', 'accu_fund_per_share',
         'retained_profit_per_share', 'cash_flow_per_share',
         'return_on_equity', 'return_on_equity_diminish',
         'asset_liability_ratio', 'net_profit_compared_growth',
         'revenue_compared_growth', 'revenue_YOY', 'net_profit_YOY',
         'un_net_profit_YOY', 'asset_per_share_YOY',
         'asset_value_per_share_YOY', 'accu_fund_per_share_YOY',
         'retained_profit_per_share_YOY', 'cash_flow_per_share_YOY',
         'return_on_equity_YOY', 'return_on_equity_diminish_YOY',
         'asset_liability_ratio_YOY', 'net_profit_compared_growth_YOY',
         'revenue_compared_growth_YOY', 'type', 'create_time', 'update_time'
     ]
     data = (0, share_code, year, revenue, net_profit, un_net_profit, aps,
             avps, afps, rpps, cfps, roe, roed, alr, npcg, rcg, revenue_YOY,
             net_profit_YOY, un_net_profit_YOY, apsy, avpsy, afpsy, rppsy,
             cfpsy, roey, roedy, alry, npcgy, rcgy, type_, create_time,
             update_time)
     # print(data)
     ret2, ret3 = check_db(spider.cs, [share_code, year], 'finance_info')
     if not ret2:
         insert_data(spider.db, spider.cs, data, 'finance_info')
     else:
         update_data(spider.db, spider.cs, [column, data])
     return item
def train_model(mall_id):
    # 开始训练模型
    random_state = 10
    metrix, tar = utils.get_data(mall_id)
    x_train, x_test, y_train, y_test = train_test_split(
        metrix, tar, test_size=0.1, random_state=random_state)
    # xgboost方法,基于boosting tree(提升树方法)
    # 设参数 训练慢
    clf_name = "xgboost"
    save_dir = "./model/" + clf_name + "_" + mall_id + "_model.m"
    n_est = 50
    clf = XGBClassifier(
        learning_rate=0.1,  # 学习率 典型值为0.01-0.2
        n_estimators=n_est,
        max_depth=5,  # 树的最大深度 一般3-10
        min_child_weight=1,  # 决定最小叶子节点样本权重和 值较大,避免过拟合 值过高,会导致欠拟合
        gamma=0,  # 指定了节点分裂所需的最小损失函数下降值。 这个参数的值越大,算法越保守
        subsample=0.8,  # 对于每棵树,随机采样的比例 减小,算法保守,避免过拟合。值设置得过小,它会导致欠拟合 典型值:0.5-1
        colsample_bytree=0.8,  # 每棵随机采样的列数的占比
        objective='binary:logistic',  # 使用二分类
        nthread=4,  # 线程数
        scale_pos_weight=1,  # 在各类别样本十分不平衡时,参数设定为一个正值,可以使算法更快收敛
        seed=0)  # 随机数的种子 设置它可以复现随机数据的结果
    print(utils.get_time(), ' ', mall_id, ' starts...')
    train_time = time.time()
    clf.fit(x_train, y_train)
    train_time = time.time() - train_time
    score = clf.score(x_test, y_test)
    joblib.dump(clf, save_dir)
    print(utils.get_time(), ' saved a model for ', mall_id, ' score: ', score,
          '  train time : ', train_time)
    train_time = int(train_time)
    return (score, n_est, train_time)
Exemple #7
0
 def save_state(self,
                conf,
                accuracy,
                e,
                to_save_folder=False,
                extra=None,
                model_only=False):
     if to_save_folder:
         save_path = conf.save_path
         if not os.path.exists(str(save_path)):
             os.makedirs(str(save_path))
     else:
         save_path = conf.model_path
         if not os.path.exists(str(save_path)):
             os.makedirs(str(save_path))
     if model_only:
         torch.save(
             self.model.state_dict(),
             os.path.join(str(save_path),
                          ('model_{}_accuracy:{}_step:{}_{}.pth'.format(
                              get_time(), accuracy, self.step, extra))))
     else:
         save = {
             'optimizer': self.optimizer.state_dict(),
             'head': [self.head[0].state_dict(), self.head[1].state_dict()],
             'model': self.model.state_dict(),
             'epoch': e
         }
         torch.save(
             save,
             os.path.join(str(save_path),
                          ('accuracy:{}_step:{}_{}.pth'.format(
                              get_time(), accuracy, self.step, extra))))
 def save_state(self,
                conf,
                accuracy,
                to_save_folder=False,
                extra=None,
                model_only=False):
     if to_save_folder:
         save_path = conf.save_path
     else:
         save_path = conf.model_path
     import os
     print('save_path', save_path)
     os.makedirs(str(save_path), exist_ok=True)
     torch.save(
         self.model.state_dict(),
         save_path / ('model_{}_accuracy:{}_step:{}_{}.pth'.format(
             get_time(), accuracy, self.step, extra)))
     if not model_only:
         torch.save(
             self.head.state_dict(),
             save_path / ('head_{}_accuracy:{}_step:{}_{}.pth'.format(
                 get_time(), accuracy, self.step, extra)))
         torch.save(
             self.optimizer.state_dict(),
             save_path / ('optimizer_{}_accuracy:{}_step:{}_{}.pth'.format(
                 get_time(), accuracy, self.step, extra)))
def extract_coverage(file, save_top_duplicated_entries, n=10):
    print utils.get_time()
    print 'loading the data from file...'
    covs = []
    lengths = []
    lines = []
    with open(file, 'r') as f:
        data = f.readline()
        while data:
            entries = data.split()
            if len(entries) < 4:
                continue
            start = long(entries[1])
            end = long(entries[2])
            length = end - start
            coverage = int(entries[3])
            covs.append(coverage)
            lengths.append(length)
            lines.append(data)
            data = f.readline()

    if save_top_duplicated_entries:
        z = zip(lengths, lines)
        sorted_z = sorted(z, key=lambda x: x[0])
        sorted_z = sorted_z[::-1]
        print[i for i, _ in sorted_z[:n]]
        print[j for _, j in sorted_z[:n]]
        save_entries([j for _, j in sorted_z[:n]], 'top_entries.bed')
    return covs, lengths
Exemple #10
0
    def save_state(self,
                   conf,
                   accuracy,
                   to_save_folder=False,
                   extra=None,
                   model_only=False):
        if to_save_folder:
            save_path = conf.save_path
        else:
            save_path = conf.model_path

        lz.mkdir_p(save_path, delete=False)

        torch.save(
            self.model.state_dict(),
            save_path / ('model_{}_accuracy:{}_step:{}_{}.pth'.format(
                get_time(), accuracy, self.step, extra)))
        if not model_only:
            torch.save(
                self.head.state_dict(),
                save_path / ('head_{}_accuracy:{}_step:{}_{}.pth'.format(
                    get_time(), accuracy, self.step, extra)))
            torch.save(
                self.optimizer.state_dict(),
                save_path / ('optimizer_{}_accuracy:{}_step:{}_{}.pth'.format(
                    get_time(), accuracy, self.step, extra)))
def start_compile():
    for filename in compile_list:
        logfile = os.path.join(log_compile, filename + '.log')
        cwdpath = os.path.join(path_base, filename)
        status = utils.get_compilation_status(logfile)
        time = utils.get_time(logfile)
        if not os.path.exists(logfile) or is_force:
            if is_purge:
                out, err = Popen(
                    ["mvn", "clean", "install", '-DskipTests=true', '-Dgpg.skip=true', '-Drat.skip=true',
                    '-Dmaven.javadoc.skip=true', '-fn', '-B' 'dependency:purge-local-repository'], cwd=cwdpath, stdout=PIPE,
                    stderr=PIPE).communicate()
            else:
                out, err = Popen(
                    ["mvn", "clean", "install", '-DskipTests=true', '-Dgpg.skip=true', '-Drat.skip=true',
                    '-Dmaven.javadoc.skip=true', '-fn', '-B'], cwd=cwdpath, stdout=PIPE,
                    stderr=PIPE).communicate()
            writefile = open(logfile, 'wb')
            writefile.write(out)
            writefile.write(err)
            writefile.close()
            status=utils.get_compilation_status(logfile)
            time = utils.get_time(logfile)
        print(filename, status, time)
    write_compile_result(compile_list)
Exemple #12
0
def run(data, filename):
    print("**********************************************************")
    print("CREATING:", filename)
    data = data.sort_values(by=['tcp_stream', 'protocol', 'start_time'])
    data[['flow_start']] = data[['flow_start']].apply(pd.to_datetime)
    data[['flow_finish']] = data[['flow_finish']].apply(pd.to_datetime)

    print(utils.get_time() + "- Generating basic features.")
    flows = generate_basic_features(data)
    merged_flows = list(itertools.chain.from_iterable(flows))
    df_merged = pd.DataFrame.from_dict(merged_flows)
    df_merged['flow_duration'] = (df_merged.flow_finish -
                                  df_merged.flow_start).dt.total_seconds()

    print(utils.get_time() + "- Generating extended features.")
    extended_features = [
        'count_dest', 'count_src', 'count_serv_src', 'count_serv_dst',
        'count_dest_conn', 'count_src_conn', 'count_serv_src_conn',
        'count_serv_dst_conn'
    ]
    #df_merged[extended_features]=df_merged.apply(lambda x: pd.Series(generate_extended_features(x.flow_start,x.src_ip,x.dst_ip,x.src_port,x.dst_port,df_merged)),axis=1)
    generate_extended_features_by_loop(df_merged)

    print(utils.get_time() + "- Saving tesing dataset into dataframe pickle.")
    with open(filename + ".pickle", 'wb') as handle:
        pickle.dump(df_merged, handle, protocol=pickle.HIGHEST_PROTOCOL)
 def _load(self):
     log('[{time}] loading from {path}'.format(time=get_time(),
                                               path=self._source_path))
     for i, label_tag in enumerate(self._label_tags):
         path = os.path.join(self._source_path, label_tag)
         files = sample(
             os.listdir(path)[self._start:self._end], self._max_num
         ) if self._max_num > 0 else os.listdir(path)[self._start:self._end]
         print('start: {}, end: {}'.format(self._start, self._end))
         print(len(files))
         pbar = ProgressBar(len(files))
         for j, filename in enumerate(files):
             filepath = os.path.join(path, filename)
             try:
                 with open(filepath, 'r') as f:
                     log_sequence = json.load(f)
                     feature = self._sequence2feature(log_sequence)
                     self._data_ids.append(
                         filepath.split('/')[-1].split('.')[0].split('_')
                         [0])
                     self._feature_data.append(feature)
                     self._label_data.append(i)
             except:
                 log('[{time}] Failed to load file {filepath}'.format(
                     time=get_time(), filepath=filepath))
                 print('[{time}] Failed to load file {filepath}'.format(
                     time=get_time(), filepath=filepath))
             pbar.updateBar(j)
Exemple #14
0
 def save_state(self,
                conf,
                accuracy,
                to_save_folder=False,
                extra=None,
                model_only=False):
     if to_save_folder:
         save_path = conf.save_path
     else:
         save_path = conf.model_path
     torch.save(
         self.backbone.state_dict(),
         save_path / ('backbone_{}_accuracy:{}_step:{}_{}.pth'.format(
             get_time(), accuracy, self.step, extra)))
     torch.save(
         self.idprehead.state_dict(),
         save_path / ('idprehead_{}_accuracy:{}_step:{}_{}.pth'.format(
             get_time(), accuracy, self.step, extra)))
     if not model_only:
         torch.save(
             self.idhead.state_dict(),
             save_path / ('idhead_{}_accuracy:{}_step:{}_{}.pth'.format(
                 get_time(), accuracy, self.step, extra)))
         torch.save(
             self.attrhead.state_dict(),
             save_path / ('attrhead_{}_accuracy:{}_step:{}_{}.pth'.format(
                 get_time(), accuracy, self.step, extra)))
         torch.save(
             self.optimizer.state_dict(),
             save_path / ('optimizer_{}_accuracy:{}_step:{}_{}.pth'.format(
                 get_time(), accuracy, self.step, extra)))
Exemple #15
0
  def increment_game(self, game_state):
    current_time = utils.get_time()
    time_delta = current_time - game_state["last_updated"]
    
    ships = game_state["ships"]
    asteroids = game_state["asteroids"]
    
    level_time = game_state["level_countdown"]
    if level_time > 0:
      level_time -= min(level_time, time_delta)
      game_state["level_countdown"] = level_time
      if level_time == 0:
        game_state["level"] += 1
        game_state["level_over"] = False
        new_asteroids = [Asteroid.make_asteroid() for _ in range(0, game_state["level"])]
        asteroids = new_asteroids
        for ship in ships:
          ship.invuln_time += 1000

    i = 0
    shipsLength = len(ships)
    while i < shipsLength:
      ship = ships[i]
      # Remove ships that time out. Mark them as leaving for half a second
      # first, so that the client has time to display some nice blink effect
      # or something down the road.
      if ship.leaving and current_time - ship.last_updated > 500:
        print("Ship leaving: {0}".format(ship.name))
        ships.pop(i)
        shipsLength -= 1
      elif current_time - ship.last_updated > 5000:
        ship.leaving = True
        ship.update(time_delta)
        i += 1
      else:
        ship.update(time_delta)
        i += 1
    
    i = 0
    asteroidsLen = len(asteroids)
    while i < asteroidsLen:
      asteroid = asteroids[i]
      asteroid.update()
      if asteroid.dead:
        asteroids.pop(i)
        asteroidsLen -= 1
      else:
        i += 1
        
    if len(asteroids) == 0 and not game_state["level_over"]:
      game_state["level_over"] = True
      game_state["level_countdown"] = 1500
    
    game_state["ships"] = ships
    game_state["asteroids"] = asteroids
    game_state["last_updated"] = utils.get_time()
Exemple #16
0
def plot_time():

    cpu_sizes = [1000, 10000, 100000, 1000000,
                 10000000]  #, 100000000, 1000000000]
    cpu_sizes = [10000, 100000, 1000000, 10000000]

    vecdot_sept = []
    vecdot_dec = []
    vecaxpy_sept = []
    vecaxpy_dec = []

    for size in cpu_sizes:

        vecdot_sept.append(1000000 * ut.get_time(
            "../data/vec-ops/vec_ops.n2_g0_c21_p42." + str(size) + ".654910",
            "VecDot", 1))
        vecdot_dec.append(1000000 * ut.get_time(
            "../data/vec-ops-december/vec_ops.n2_g0_c21_p42." + str(size) +
            ".795805", "VecDot", 1))

        vecaxpy_sept.append(1000000 * ut.get_time(
            "../data/vec-ops/vec_ops.n2_g0_c21_p42." + str(size) + ".654910",
            "VecAXPY", 3))
        vecaxpy_dec.append(1000000 * ut.get_time(
            "../data/vec-ops-december/vec_ops.n2_g0_c21_p42." + str(size) +
            ".795805", "VecAXPY", 3))

    # plot
    fig = plt.figure()
    ax = fig.add_subplot(111)

    plt.plot(cpu_sizes, vecdot_sept, color="red", label="VecDot 09/19")
    plt.plot(cpu_sizes,
             vecdot_dec,
             color="red",
             linestyle="dashed",
             label="VecDot 12/19")
    plt.plot(cpu_sizes, vecaxpy_sept, color="black", label="VecAXPY 09/19")
    plt.plot(cpu_sizes,
             vecaxpy_dec,
             color="black",
             linestyle="dashed",
             label="VecAXPY 12/19")
    plt.title("CPU execution time", fontsize=12)
    plt.xlabel("Vector size", fontsize=12)
    plt.ylabel("Seconds", fontsize=12)
    plt.legend(loc="upper left", fontsize=12, frameon=False)
    plt.xscale('log')
    # ax.ticklabel_format(axis="y", style="sci", useLocale=True)
    # plt.text(.03, 1.03, "1e-6", horizontalalignment='center', verticalalignment='center', transform = ax.transAxes)
    plt.tight_layout()

    plt.savefig("../plots/CPU_sept_vs_dec_time.png")
    plt.show()
Exemple #17
0
def VecSet(show):

    gpu_sizes = [
        1000, 10000, 100000, 1000000, 2000000, 4000000, 6000000, 8000000,
        10000000, 20000000, 40000000, 60000000, 80000000, 100000000, 1000000000
    ]
    cpu_sizes = [1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000]

    cpu = []
    gpu = []

    for size in cpu_sizes:
        time1 = ut.get_time("../data/vec-ops/vec_ops.n2_g0_c21_p7." +
                            str(size) + ".654910", "VecSet", 2)  # 7 CPUs
        cpu.append(ut.calc_rate(size, time1))

    for size in gpu_sizes:
        time2 = ut.get_time("../data/figures-2-7-8-9/vec_ops.n1_g1_c2_a1." +
                            str(size) + ".668627", "VecSet",
                            3)  # 1 GPU with 1 CPU
        gpu.append(ut.calc_rate(size, time2))

    # plot
    num = 2
    cm = plt.get_cmap('inferno')
    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.set_color_cycle([cm((1. * i) / num) for i in range(num)])

    ax.plot(cpu_sizes,
            cpu,
            marker="o",
            markersize="4",
            markeredgewidth=2,
            label="7 CPU cores")
    ax.plot(gpu_sizes,
            gpu,
            marker="o",
            markersize="4",
            markeredgewidth=2,
            label="1 GPU")

    plt.title("VecSet performance", fontsize=12)
    plt.xlabel("Vector size", fontsize=12)
    plt.ylabel("8 Mbytes/second", fontsize=12)
    plt.legend(loc="upper left", fontsize=12, frameon=False)
    plt.tight_layout()
    plt.xscale('log')
    ax.set_yticklabels(
        ['{:,}'.format(int(x)) for x in ax.get_yticks().tolist()])

    plt.savefig("../plots/VecSet.png")
    if show: plt.show()
Exemple #18
0
def find_hours_since_last_backup(conn, instance):
    my_snapshots = get_snapshots(conn, instance)

    if not my_snapshots:
        return None
    latest_snapshot = max(my_snapshots, key=lambda item:utils.get_time(item.start_time))

    latest_time = utils.get_time(latest_snapshot.start_time)
    now_utc = utils.get_current_utc_time()
    diff = now_utc - latest_time
    diff_hours = diff.total_seconds() / 3600
    print "Latest snapshot is from {}, which is {} hours ago".format(latest_time, diff_hours)
    return diff_hours
def process_coverage(covs, lengths):
    print utils.get_time()
    print 'processing the data...'
    z = zip(covs, lengths)
    unique_covs = sorted(list(set(covs)))
    if 1 in unique_covs:
        unique_covs.remove(1)
    filtered_lengths = []
    for c in unique_covs:
        if c == 27:
            break
        filtered_lengths.append(count_lengths_distribution_for_coverage(c, z))
    draw_distr(filtered_lengths[:24], unique_covs[:24], '2-25.pdf')
def process_coverage(covs,lengths):
    print utils.get_time()
    print 'processing the data...'
    z = zip(covs,lengths)
    unique_covs = sorted(list(set(covs)))
    if 1 in unique_covs:
        unique_covs.remove(1)
    filtered_lengths=[]
    for c in unique_covs:
        if c == 27 :
            break
        filtered_lengths.append(count_lengths_distribution_for_coverage(c,z))
    draw_distr(filtered_lengths[:24],unique_covs[:24],'2-25.pdf')
Exemple #21
0
def fig11_cpu(sizes, operation1, count1, operation2, count2, show):

    cpu_time1 = []
    cpu_time2 = []

    # get data
    for size in sizes:

        # time from file
        time = ut.get_time(
            "../data/cpu-flush-cache/vec_ops.n2_g0_c21_p42." + str(size),
            operation1, count1)
        cpu_time1.append(1000000 *
                         time)  # cheating so that y-axis units are 10^-6

        time = ut.get_time(
            "../data/cpu-flush-cache/vec_ops.n2_g0_c21_p42." + str(size),
            operation2, count2)
        cpu_time2.append(1000000 * time)

    # plot
    num = 2
    cm = plt.get_cmap('inferno')
    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.set_color_cycle([cm((1. * i) / num) for i in range(num)])

    # plt.plot(sizes, cpu_time1, marker=".", markersize="6", markeredgewidth=2, label="42 CPUs " + operation1)
    plt.plot(sizes,
             cpu_time2,
             marker=".",
             markersize="6",
             markeredgewidth=2,
             label="42 CPUs " + operation2)
    plt.title("CPU execution time", fontsize=12)
    plt.xlabel("Vector size", fontsize=12)
    plt.ylabel("Seconds", fontsize=12)
    plt.legend(loc="lower right", fontsize=12, frameon=False)
    ax.ticklabel_format(axis="y", style="sci", useLocale=True)
    ax.ticklabel_format(axis="x", style="sci", scilimits=(0, 0))
    plt.text(.03,
             1.03,
             "1e-6",
             horizontalalignment='center',
             verticalalignment='center',
             transform=ax.transAxes)
    plt.tight_layout()

    plt.savefig("../plots/" + operation2 + "CPU_sm.png")
    # plt.savefig("../plots/CPU_" + operation1 + "_" + operation2 + "_time_42CPU.png")
    if show: plt.show()
Exemple #22
0
def sync_metadata(cs):

    cluster_name = cs.get('DBClusterIdentifier')
    info = cs.get('InstanceInfo')
    tags = info.get('Tags')
    hhmm = utils.get_time(utils.CONST_KEY_TIME_STOP,tags)
    sshh = utils.get_time(utils.CONST_KEY_TIME_START,tags)
    #print hhmm,sshh
    #extract security groups
    sgs = cs['VpcSecurityGroups']
    #print sgs
    securityGroupIds =[]
    for sg in sgs:
        securityGroupIds.append(sg['VpcSecurityGroupId'])

    table = dynamodb.Table(CONST_TABLE)

    Found = False
    try:
        response = table.query( KeyConditionExpression=Key(CONST_FIELD_CLUSTER_NAME).eq(cluster_name))
        #print "Working with existing metadat.."
        Found = True
    except Exception as e:
        pass

    item={
            CONST_FIELD_CLUSTER_NAME :cluster_name,
            CONST_FIELD_TIME_STOP_NUM:hhmm[0],
            CONST_FIELD_TIME_START_NUM:sshh[0],
            CONST_FIELD_TAGS:tags,
            CONST_FIELD_CLUSTER_PARAMETER_GROUP:cs.get('DBClusterParameterGroup'),
            CONST_FIELD_SUBNET_GROUP:cs.get('DBSubnetGroup'),
            CONST_FIELD_SECURITY_GROUP_IDS:securityGroupIds,
            "db_instance_name":info.get('DBInstanceIdentifier'),

    }
    if Found and len(response.get('Items'))>0:
       item = response['Items'][0]
       item[CONST_FIELD_CLUSTER_NAME] = cluster_name
       item[CONST_FIELD_TIME_STOP_NUM]=hhmm[0]
       item[CONST_FIELD_TIME_START_NUM]=sshh[0]
       item[CONST_FIELD_TAGS]=tags
       item[CONST_FIELD_CLUSTER_PARAMETER_GROUP]=cs.get('DBClusterParameterGroup')
       item[CONST_FIELD_SUBNET_GROUP] = cs.get('DBSubnetGroup')
       item[CONST_FIELD_SECURITY_GROUP_IDS] =securityGroupIds
       item["db_instance_name"] = info.get('DBInstanceIdentifier')


    response = table.put_item(Item = item)
    print "dynmaod response",response
    time.sleep(.120)
Exemple #23
0
 def save_state(self, conf, accuracy, to_save_folder=False, extra=None, model_only=False):
     if to_save_folder:
         save_path = conf.save_path
     else:
         save_path = conf.model_path
     torch.save(
         self.model.state_dict(), save_path /
                                  ('model_{}.pth'.format(get_time())))
     if not model_only:
         torch.save(
             self.head.state_dict(), save_path /
                                     ('head_{}.pth'.format(get_time())))
         torch.save(
             self.optimizer.state_dict(), save_path /
                                          ('optimizer.pth'.format(get_time())))
Exemple #24
0
def find_hours_since_last_backup(conn, instance):
    my_snapshots = get_snapshots(conn, instance)

    if not my_snapshots:
        return None
    latest_snapshot = max(my_snapshots,
                          key=lambda item: utils.get_time(item.start_time))

    latest_time = utils.get_time(latest_snapshot.start_time)
    now_utc = utils.get_current_utc_time()
    diff = now_utc - latest_time
    diff_hours = diff.total_seconds() / 3600
    print "Latest snapshot is from {}, which is {} hours ago".format(
        latest_time, diff_hours)
    return diff_hours
Exemple #25
0
def main():
    # Simple Algorithm
    print('Starting algorithm: {}'.format(get_time()))
    core = Core(
        requirements=get_requirements(),
        analysts=get_analysts()
    )
    core.run()
    if core.solution:
        print('Stopping algorithm: {}\nResults:'.format(get_time()))
        print('Solution fitness: {}'.format(core.solution.fitness))
        for gene in core.solution.genes:
            print('{0} ({1})'.format(gene, gene.fitness))
    else:
        print('No solution was found!')
Exemple #26
0
    def get_reply(self, reply_selector):
        created = "".join(
            reply_selector.xpath(
                './p[@class="comment__title"]/text()').getall()).strip()
        created = get_time(created)
        raw_name = reply_selector.xpath(
            './p[@class="comment__title"]/span/text()').get()
        name = raw_name.replace("đề nghị xóa",
                                "").replace("đã", "").replace("❌", "").strip()
        content = reply_selector.xpath(
            './p[@class="comment__content text-500"]/text()').get()

        reaction = None
        if reply_selector.xpath(
                './p[@class="comment__title"]/span/span[@class="icon-like icon has-text-success"]'
        ):
            reaction = 'LIKE'
        if reply_selector.xpath(
                './p[@class="comment__title"]/span/span[@class="icon-dislike icon has-text-danger"]'
        ):
            reaction = 'DISLIKE'
        if 'đề nghị xóa' in raw_name:
            reaction = 'SHOULD_DELETE'

        return {
            "name": name,
            "content": content,
            "created": created,
            "reaction": reaction
        }
Exemple #27
0
def extract(handler, key, info, ext):
	if 'html' not in ext or not ext['html']:
		if not info['html']:
			raise ValueError('info html is None')
		ext['html'] = handler.html_file.get(info['html'])
		handler.log.info('load html: %s - %d' % (info['html'], len(ext['html'])))
		if not ext['html']:
			raise ValueError('load html failed.')

	article = html2article(ext['html'], info['url'], **ext['selector'])
	if not article:
		raise ValueError('article not found.')

	article['pages'] = filter(lambda x: x != info['url'], article['pages'])

	if article['pubtime']:
		pubtime = get_time(article['pubtime'])
		if pubtime < 946656000:
			pubtime = url2time(info['url'])
		article['pubtime'] = pubtime
	else:
		article['pubtime'] = url2time(info['url'])

	if article['pubtime'] > time.time():
		article['pubtime'] = info['created'] - 86400 * 7

	return {'article':article}
Exemple #28
0
    def door_OPEN(self, door, pin_state):
        message = ''
        curr_time = Utils.get_time()
        cur_dt = Utils.epoch_to_datetime(curr_time).strftime(Utils.TIMEFORMAT)

        if door.send_open_im_debug == True and Utils.isDebugging:
            self.logger.info("%s %s(%s)" % (door.name, door.state, pin_state))
            door.send_open_im_debug = False

        if door.send_open_im == True and Utils.is_time_expired(
                door.tis.get(door.state), self.time_to_report_open, curr_time):
            door.send_open_im = False
            message = '%s is %s at %s' % (door.name, door.state, cur_dt)

        if Utils.is_time_expired(door.tis.get(Utils.STILLOPEN),
                                 self.time_to_report_still_open, curr_time):
            door.tis[Utils.STILLOPEN] = Utils.round_up_minutes(curr_time)
            message = '%s is still %s at %s' % (door.name, door.state, cur_dt)

        #etime = Utils.elapsed_time(int(curr_time - door.tis.get(door.state)))
        #if self.time_to_force_close != None and Utils.isTimeExpired(door.tis.get(Utils.FORCECLOSE), self.time_to_force_close, curr_time):
        #    door.tis[Utils.FORCECLOSE] = curr_time
        #    message = '%s force closed %s->%s at %s (%s)' % (door.name, door.state, Utils.CLOSED, cur_dt, etime)
        #    door.toggle_relay()

        return message
Exemple #29
0
 def rpush(self, msg, timeout=0):
     self._conn.rpush(self.queue, msg)
     if timeout:
         msg = "%s%s"%(msg,get_uuid())
         self.zadd(get_time()+timeout ,msg)
         return msg
     return True
Exemple #30
0
    def capture_video(self, duration):
        video_name = 'video_' + utils.get_time() + '.h264'
        # Recording duration (ms)
        self.pi_camera.capture_video(video_name, duration)
        video_name = self.video_storage.store(video_name)

        return video_name
    def get_page_fan_count(self, page):
        """
        Function that gets the amount of likes from Facebook. 
        INPUT: Page queried + Access token for FB API.
        OUTPUT: Data returned from FB (dict), Time when data is received (str) 
        """

        print("[i] Getting data from Facebook. Please wait.")

        # Fields to query from Facebook API (see FB documentation for details)
        fields = ["fan_count"]

        # Request to Facebook API for the page fans count
        url = "https://graph.facebook.com/{}?fields={}&access_token={}".format(
            page, ",".join(fields), self.access_token)
        data_fb = requests.get(url).json()

        if "error" in data_fb:

            # For local use if needed
            # raise FacebookError(e_log=data_fb["error"])
            return {"error": True, "response": data_fb["error"]}

        else:
            print("[i] Data successfully retrieved :).")
            return {
                "error":
                False,
                "data": [{
                    "date": utils.get_time(time_type="timestamp"),
                    "page": page,
                    "likes": data_fb["fan_count"]
                }]
            }
Exemple #32
0
def generate_asg_instance(tz):
    asgList = {"1": "name"}

    print "creating autoscaling instance map"

    response = asgclient.describe_auto_scaling_groups()

    #print response
    #nextToken = response['NextToken']
    asgs = response['AutoScalingGroups']
    for asg in asgs:
        name = asg['AutoScalingGroupName']
        tags = asg["Tags"]
        ## starting suspended asgs based on tiem
        v = utils.get_time(utils.CONST_ASG_RESUME_KEY, tags)
        if not v:
            print "no asg schedule(nothing to resume)"
        else:
            c = utils.current_time(tz)
            if c[0] > v[0]:
                resume_asg(name)
            if c[0] == v[0] and c[1] >= v[1]:
                resume_asg(name)
        # end asg stuff

        #print asg['AutoScalingGroupName'],'\n'
        for instance in asg['Instances']:
            iid = instance['InstanceId']
            asgList[iid] = name
    return asgList
Exemple #33
0
 def update_media_status(self, media_id):
     d = get_time()
     return '''
             UPDATE eodas.tape_info 
             SET checked=TRUE, verification_date='{1}', rank='{1}'  
             WHERE tape_id='{0}' 
             '''.format(media_id, d)
Exemple #34
0
def main_train(model, m0, vocab, seqs, n_epochs=1, l_rate=1 / 10**6):
    split_i = 1
    avg_acc = 0
    avg_acc3 = 0
    start_time = get_time()

    n_splits = len(seqs)
    model_name = m0["model_name"]
    b_size = m0["batch_size"]
    word2id = vocab["word2id"]

    for seqs_train, seqs_val in seqs:
        print("\n")
        print("********train split[" + str(split_i) + "/" + str(n_splits) +
              "]")
        print("train_split length : " + str(len(seqs_train[0])))
        print("val_split length   : " + str(len(seqs_val[0])))
        t_data = n_epochs, seqs_train, seqs_val
        model, acc, acc3 = train_model(t_data, model, l_rate, b_size, word2id,
                                       device)
        avg_acc += acc
        avg_acc3 += acc3
        split_i += 1
    avg_acc = avg_acc / n_splits
    avg_acc3 = avg_acc3 / n_splits
    print("")
    print("**avg accuracy     : " + str(round(100 * avg_acc, 2)) + "%")
    print("**avg accuracy3    : " + str(round(100 * avg_acc3, 2)) + "%")
    print("**total time taken : " + comp_time(start_time, None))
    if model_name != None:
        save_model(model_name, model)
        save_acc(model_name, n_epochs, avg_acc, avg_acc3)
Exemple #35
0
    def check_door_status(self, door):
        self.logger = logging.getLogger(__name__)
        message = ''
        curr_time = Utils.get_time()
        pin_state = door.get_state_pin()

        if pin_state != door.state:
            if door.state != Utils.OPENING and door.state != Utils.CLOSING:
                door.state = Utils.OPENING if door.state == Utils.CLOSED else Utils.CLOSING
                door.tis[door.state] = curr_time
                if Utils.isDebugging:
                    self.logger.info("%s %s(%s)" %
                                     (door.name, door.state, pin_state))

        if door.state == Utils.OPENING:
            message = self.door_OPENING(door)

        elif (door.state == Utils.CLOSING):
            message = self.door_CLOSING(door)

        elif door.state == Utils.OPEN:
            message = self.door_OPEN(door, pin_state)

        if message != "":
            self.logger.info(message)
            self.send_msg(message)

        self.updateHandler.handle_updates()
Exemple #36
0
def process_img(name, label, crop_shape, scale, random_draws, 
                to_augment=True, no_rotation=True, logging=True):
    imgs = []
    if logging:
        print "%s [%d] Processing file %s" % (get_time(), os.getpid(), name)
    pad_value = 127
    img = image_load(name)
    simg = scale_radius(img, round(scale / .9))
    uimg = unsharp_img(simg, round(scale / .9))
    suimg = subsample_inner_circle_img(uimg, round(scale / .9), pad_value)
    cimg = center_crop(suimg, crop_shape)
    pimg = pad_img(cimg, (2 * scale, 2 * scale, 3), value=127)
    pimg[:10, :, :] = pad_value
    pimg[-10:, :, :] = pad_value
    imgs.append(pimg)

    # Check if augmentation is needed
    if (to_augment and np.random.uniform(0, 1) > pb[label]) or (not to_augment):
        return imgs

    for i in range(random_draws):
        dist_img = get_distorted_img(simg, 127, no_rotation)
        uimg = unsharp_img(dist_img, round(scale / .9))
        suimg = subsample_inner_circle_img(uimg, round(scale / .9), pad_value)
        cimg = center_crop(suimg, (256, 256))
        dimg = pad_img(cimg, (2 * scale, 2 * scale, 3), value=127)
        dimg[:10, :, :] = pad_value
        dimg[-10:, :, :] = pad_value
        imgs.append(dimg)

    return imgs
Exemple #37
0
def downsample(fname_label, mode, logging=True):
    name, label = fname_label
    if logging:
        print "%s [%d] Processing file %s" % (get_time(), os.getpid(), name)
    a = cv2.imread(name).astype(np.uint8)
    s = 512. / a.shape[1]
    out = cv2.resize(a, None, fx=s, fy=s)
    write_img(out, name, mode)
    return a
def run(hal,specie_source,genome_bed_path,genome_path,specie_target,output_prefix) :
    print utils.get_time()
    print 'calling liftover for genome mapping annotation...'
    bed = run_halLiftover(hal,specie_source,genome_bed_path,specie_target,output_prefix) 
    #bed = '/hive/groups/recon/projs/pipeline_data/comparative/tmp/2014-11-10-09\:19\:25/coverage.FelisCatus.PantheraTigris.bed'

    print utils.get_time()
    print 'sorting mapped entries...'
    sorted = run_sort(bed)

    print utils.get_time()
    print 'counting size...'
    size = utils.run_faSize(genome_path, './')

    print utils.get_time()
    print 'counting coverage...'
    run_genomecov(sorted, size)
    
    print utils.get_time()
    print 'done.'
Exemple #39
0
def write_imgs(imgs, name, label, output_folder_name, crop_shape):
    if imgs is None:
        print "%s [%d] WARN: File not processed: %s" % (get_time(), os.getpid(), name)
        return
    new_name = os.path.join(output_folder_name, '%d' % label, 
                            os.path.basename(name))
    make_folder_tree(new_name)
    for i, im in enumerate(imgs):
        if i == 0:
            cv2.imwrite(new_name, im)
            continue
        out_name = new_name.replace(".jpeg", "_%d.jpeg" % (i))
        out_im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
        cv2.imwrite(out_name, out_im)
Exemple #40
0
	def parse(self):
		res = {'int':0, 'str':''}
		pubtime = self.parse_time()
		if not pubtime:
			return res

		res['str'] = pubtime
		timestamp = get_time(pubtime)
		if pubtime < 946656000:
			pubtime = url2time(self.article.url)
			res['int'] = pubtime
		else:
			res['int'] = url2time(self.article.url)
		return res
Exemple #41
0
    def _today_stat(self):
        parts = list()
        for row in calc.get_today_list(self.user_id):
            line = u""

            if row.date is not None:
                line += u"{} ".format(u.get_time(row.date))

            if row.calories is not None:
                if row.weight is not None:
                    line += u"{0} ккал ({1} г, {2} ккал/100г)".format(int(row.calories * row.weight / 100), row.weight, row.calories)
                else:
                    line += u"{0} ккал".format(row.calories)

            if len(line) > 0:
                parts.append(line)

        return u"\n".join(parts)
Exemple #42
0
    def play(self, n_step=10000, n_episode=100, test_ep=None, render=False):
        if test_ep == None:
            test_ep = self.ep_end

        test_history = History(self.config)

        if not self.display:
            gym_dir = '/tmp/%s-%s' % (self.env_name, get_time())
            self.env.env.monitor.start(gym_dir)

        best_reward, best_idx = 0, 0
        for idx in xrange(n_episode):
            screen, reward, action, terminal = self.env.new_random_game()
            # added by jmei: update internal state immediately
            self.update_lstm_states(action, reward, screen, terminal)
            # end added
            current_reward = 0

            for _ in range(self.history_length):
                test_history.add(screen)

            for t in tqdm(range(n_step), ncols=70):
                # 1. predict
                action = self.predict(self.lstm_out_prev, test_ep)
                # 2. act
                screen, reward, terminal = self.env.act(action, is_training=False)
                # 3. observe
                test_history.add(screen)
                self.update_lstm_states(action, reward, screen, terminal)

                current_reward += reward
                if terminal:
                    break

            if current_reward > best_reward:
                best_reward = current_reward
                best_idx = idx

            print "=" * 30
            print " [%d] Best reward : %d" % (best_idx, best_reward)
            print "=" * 30

        if not self.display:
            self.env.env.monitor.close()
 def post(self, ask_id):
     coll=self.get_database()
     theAskDoc=coll.find_one({'username':self.get_current_user(),'_id':ObjectId(ask_id)})
     theAsk=theAskDoc['Ask']
     answerBody=self.get_argument('answer_body')
     created_time=get_time()
     answer=Answer(ask=theAsk,body=answerBody,User=self.get_User(),created_at=created_time)
     try:
         theAsk['answers_count']+=1
     except:
         theAsk['answers_count']=1
     theAsk['replied_at']=created_time
     try:
         coll.update_one({'username':self.get_current_user(),'_id':ObjectId(ask_id)},{'$set':{'Ask':theAsk}})
         coll.insert_one({'username':self.get_current_user(),'User':self.get_User(),'Ask':theAsk,'Answer':answer,'ask_id':ObjectId(ask_id)})
         self.redirect("/ask/%s" % ask_id)
     except Exception,exc:
         self.write("save answer error")
         self.render('ask_show.html',current_user=self.get_userInfo(),title="Question",ask=theAsk,answers=[],id=ask_id)
def process_img(name, crop_shape, scale):
    ferr = open("out_%d.log" % os.getpid(), 'a')
    sys.stdout = ferr
    sys.stderr = ferr
    print "%s [%d] Processing file %s" % (get_time(), os.getpid(), name)
    a = cv2.imread(name)
    a = bbox(scaleRadius(a,scale))
    if a is None:
        ferr.close()
        return
    b = np.zeros(a.shape)
    cv2.circle(b,(a.shape[1]//2,a.shape[0]//2),int(scale*0.9),(1,1,1),-1,8,0)
    aa = cv2.addWeighted(a,4,cv2.GaussianBlur(a,(0,0),scale/30),-4,128)*b+128*(1-b)
    rand_im = random_crops(aa, shape=crop_shape)
    if "train/" in name:
        new_name = name.replace("train/", "%d_train/" % scale)
    elif "validation/" in name:
        new_name = name.replace("validation/", "%d_val/" % scale)
    cv2.imwrite(new_name,rand_im)
    ferr.close()
Exemple #45
0
def delete_oldest_backups(conn, backups_to_keep, backup_description):
    """
    delete_oldest_backups(boto.ec2.connection.EC2Connection, int, string)
    """

    my_amis = get_my_amis(conn)
    my_amis = [x for x in my_amis if x.description == backup_description and len(x.block_device_mapping) == 1]
    def get_snapshot_id(ami):
        return utils.single(ami.block_device_mapping).snapshot_id

    snapshot_ids = [get_snapshot_id(ami) for ami in my_amis]

    # my_amis = filter(lambda x : x.description == backup_description, my_amis)
    # mapped = map(lambda ami: single_or_none(ami.block_device_mapping), my_amis)
    #filtered = filter(lambda item: item is not None, mapped)
    #snapshot_ids = map(lambda device_mapping : device_mapping.snapshot_id, filtered)
    snapshots = conn.get_all_snapshots(snapshot_ids = snapshot_ids)
    snapshots_by_id = list_to_dict(map(lambda snapshot : [snapshot.id, snapshot], snapshots))

    amis_and_snapshots = []
    for ami in my_amis:
        snapshot_id = get_snapshot_id(ami)
        snapshot = snapshots_by_id[snapshot_id]
        amis_and_snapshots.append(
                {'snapshot_id' : snapshot_id,
                 'snapshot' : snapshot,
                 'ami' : ami,
                 'date' : utils.get_time(snapshot.start_time)})

    #def sorter(key1, key2):
#        return key1['date'] < key2['date']

    amis_and_snapshots.sort(key = lambda item : item['date'])

    if len(amis_and_snapshots) <= backups_to_keep:
        print "Got {}/{} backups, nothing to trim".format(len(amis_and_snapshots), backups_to_keep)
        return
    
    for i in range(len(amis_and_snapshots) - backups_to_keep):
        delete_ami_and_snapshot(conn, amis_and_snapshots[i])
Exemple #46
0
  def play(self, n_step=10000, n_episode=10, test_ep=None, render=False):
    if test_ep == None:
      test_ep = self.ep_end

    test_history = History(self.config)

    if not self.display:
      self.env.env.monitor.start('/tmp/%s-%s' % (self.env_name, get_time()), video_callable=lambda count: True)

    best_reward, best_idx = 0, 0
    for idx in xrange(n_episode):
      screen, reward, action, terminal = self.env.new_random_game()
      current_reward = 0

      for _ in range(self.history_length):
        test_history.add(screen)

      for t in tqdm(range(n_step), ncols=70):
        # 1. predict
        action = self.predict(test_history.get(), test_ep)
        # 2. act
        screen, reward, terminal = self.env.act(action, is_training=False)
        # 3. observe
        test_history.add(screen)

        current_reward += reward
        if terminal:
          break

      if current_reward > best_reward:
        best_reward = current_reward
        best_idx = idx

      print "="*30
      print " [%d] Best reward : %d" % (best_idx, best_reward)
      print "="*30

    if not self.display:
      self.env.env.monitor.close()
def count_lengths_distribution_for_coverage(c,z):
    print utils.get_time()
    print 'calculating for coverage',c
    filtered_lengths = [j for _,j in filter(lambda x : x[0] == c,z)]
    print 'data size is',len(filtered_lengths)
    return filtered_lengths
Exemple #48
0
    def run(self):

        tarball_log.log("Tarball creator starting...\n")

        while not self.cancelled():

            start_time = utils.get_time()

            # routinely check for updates (sleep every so often)
            for pack_name, pack_obj in self.pack_objs.iteritems():

                # We are reloading pack_objs, but this for loop won't notice it until we enter it again
                if self.cancelled():
                    continue

                # get latest version from the tree
                latest_tree_rev = self.src_repo.latest_tree_revision()
                # print "Latest tree rev: %d (%s)" % (latest_tree_rev, pack_name)

                if not latest_tree_rev:
                    tarball_log.log("Error getting latest tree rev, trying later... (%s)\n" % pack_name)

                    # Restart for loop over...
                    break

                    # Only do for the last couple of commits, rather than constantly updating a base revision
                if latest_tree_rev <= self.num_sequential:
                    starting_rev = 1
                else:
                    starting_rev = latest_tree_rev - self.num_sequential

                    # If we're not building each and every checkin, only build the latest
                if not self.sequential:
                    starting_rev = latest_tree_rev

                    # Pretty much do every commit (for binary search on regressions) (should be adjustable)
                    #  The + 1 is so that the latest tree revision will be checked (range func does not include the last number in the sequence)
                for i in range(starting_rev, latest_tree_rev + 1):

                    latest_for_package = self.src_repo.latest_path_revision(pack_obj.info["HEAD_PATH"], revision=i)
                    if not latest_for_package:
                        tarball_log.log("Error getting revision %d, trying later... (%s)\n" % (i, pack_name))
                        # Skip to next pack...
                        break

                    if not self.distfiles.contains("HEAD", pack_name, str(latest_for_package)) and not self.cancelled():
                        command = "cd %s; ./mktarball --snapshot %s %d" % (
                            config.packaging_dir,
                            pack_name,
                            latest_for_package,
                        )
                        tarball_log.log("Executing: %s\n" % (command))

                        # TODO: the system needs to be smarter about reinstalling the same rpms over and over...

                        # This will show console output, but not write to the log
                        #  Log will be for brief info, and the console will watch what's currently going on
                        # (For some reason my signal gets ignored if I'm using os.system... seems to work with popen)
                        (code, output) = utils.launch_process(command, print_output=0)
                        tarball_log.log("Exit code: %d (%s)\n" % (code, pack_name))

                        # handle jail busy errors (exit code of 2)
                        if code == 2:
                            tarball_log.log("Jail busy, retrying later... (%s)\n" % pack_name)

                            # handle svn timeouts
                        elif code == utils.KILLED_EXIT_CODE:
                            tarball_log.log("svn commands killed, retrying later... (%s)\n" % pack_name)

                            # Handle failed tarballs...
                        elif code:
                            tarball_log.log("Tarball creation failed...(%s)\n" % pack_name)

                            # Send out the log with the tarball, or at least a link... ?
                            link = "http://mono.ximian.com/monobuild/tarball_logs/HEAD/%s/%d.log" % (
                                pack_name,
                                latest_for_package,
                            )
                            utils.send_mail(
                                "*****@*****.**",
                                "*****@*****.**",
                                "mktarball failed (%s %d)" % (pack_name, latest_for_package),
                                "mktarball has failed for package %s revision %d\n\n%s"
                                % (pack_name, latest_for_package, link),
                            )

            time_duration = utils.time_duration_asc(start_time, utils.get_time()) * 60

            # Only sleep if this loop was shorter than max_poll_interval
            #  and if we do sleep, discount the time_duration
            if not self.cancelled() and time_duration < self.max_poll_interval:
                # tarball_log.log("Sleeping for %d seconds...\n" % (self.max_poll_interval - time_duration) )
                time.sleep(self.max_poll_interval - time_duration)

                # Exiting because we've been cancelled
        tarball_log.log("Tarball creator shutting down...\n")
Exemple #49
0
    def run(self):

        distro = self.distro
        scheduler_log.log("%s:\tStarting scheduler\n" % (distro))

        while not self.cancelled():

            packages_to_build = []
            for pack_def in config.sd_latest_build_packages:
                pack_obj = packaging.package("", pack_def)
                if pack_obj.valid_build_platform(distro):
                    packages_to_build.append(pack_def)

            num_started_builds = 0
            start_time = utils.get_time()

            # Build each package for this jail
            for package_name in packages_to_build:

                # Skip builds so we can exit
                if self.cancelled():
                    continue

                # Check to see what the latest tarball is
                # The src_file_repo class is not threadsafe, so provide a mutex here
                tarball_lock.acquire()
                try:
                    tarball_filename = tarballs.get_latest_tarball("HEAD", package_name)
                except:
                    # catch this in case the filename is being edited by hand
                    tarball_filename = ""
                tarball_lock.release()

                if not tarball_filename:
                    # scheduler_log.log("%s:\t*** Error getting latest tarball (%s) (Probably doesn't exist...)!!!\n" % (distro, package_name) )
                    pass

                else:

                    # print "Latest tarball: " + tarball_filename

                    # Get version
                    version, ext = version_re.search(tarball_filename).groups()

                    info = datastore.build_info("HEAD", distro, package_name, version)

                    # Build if the build doesn't exist already
                    if not info.exists:
                        command = "cd %s; ./build --suppress_output %s %s %s" % (
                            config.packaging_dir,
                            distro,
                            package_name,
                            version,
                        )
                        scheduler_log.log("%s:\t%s\n" % (distro, command))

                        num_started_builds += 1
                        # TODO: hmm... is this not blocking?  Seems this code continues before being able to run tests?
                        (code, output) = utils.launch_process(command, print_output=0)
                        # Testing...
                        # code = 2

                        # Is the jail busy?  if so, just repeat this loop (and select a new tarball if a newer one exists)
                        # Hmm... this really shouldn't happen, as much at least
                        if code == 2:
                            # scheduler_log.log("%s:\tJail is busy or offline... will retry again (%s)\n" % (distro, package_name) )
                            num_started_builds -= 1

                        if code == 5:
                            scheduler_log.log(
                                "%s:\tbuild info is missing, but packages exist... ?? will retry again (%s)\n"
                                % (distro, package_name)
                            )
                            num_started_builds -= 1
                    else:
                        # scheduler_log.log("%s:\tSkipping existing build (%s, %s)\n" % (distro, package_name, version) )
                        pass

            time_duration = utils.time_duration_asc(start_time, utils.get_time()) * 60
            if num_started_builds == 0 and time_duration < config.sd_wakeup_interval:
                # scheduler_log.log("%s:\tSleeping %d seconds...\n" % (distro, config.sd_wakeup_interval - time_duration) )
                time.sleep(config.sd_wakeup_interval - time_duration)

                # Exiting because we've been removed from the configuration
        scheduler_log.log("%s:\tExiting upon user request...\n" % distro)
def run(dir, maf, sizes_folder, species_file):
    grimm_synt_input_file = os.path.join(dir, utils.get_name(maf)+'.grimm_synt')
   
    print utils.get_time()
    print 'converting maf to input for grimm...'
    params = ['./generate_input_grimm_synt', maf, sizes_folder, grimm_synt_input_file, species_file]
    subprocess.check_call(params)
    
    print utils.get_time()
    print 'generating anchors for grimm_synt...'
    anchors_folder = os.path.join(dir,'grimm_synt_anchors')
    utils.create_dir_if_not_exists(anchors_folder)
    params = ['grimm_synt', '-A', '-f', grimm_synt_input_file, '-d', anchors_folder]
    subprocess.check_call(params)
    print utils.get_time()
    print 'running grimm_synt...'
    grimm_synt_output = os.path.join(dir,'grimm_synt_output')
    utils.create_dir_if_not_exists(grimm_synt_output)
    #used this for comparing mhc regions
    #params = ['grimm_synt','-f', os.path.join(anchors_folder, 'unique_coords.txt'),'-d',grimm_synt_output, '-m 1000 -g 1000 -c']
    #params = ['grimm_synt','-f', os.path.join(anchors_folder, 'unique_coords.txt'),'-d',grimm_synt_output,'-m', '300000', '-g', '300000', '-c']
    params = ['grimm_synt','-f', os.path.join(anchors_folder, 'unique_coords.txt'),'-d',grimm_synt_output,'-m', '100000', '-g', '100000', '-c']
    subprocess.check_call(params)
    print 'synteny blocks are at',os.path.join(grimm_synt_output,'blocks.txt')
    print utils.get_time()
    print 'creating bigBed files...'
    create_bigBed(grimm_synt_output, sizes_folder, species_file)
    print utils.get_time()
    print 'running grimm...'
    params = ['grimm', '-f', os.path.join(grimm_synt_output,'mgr_macro.txt'), '-o', os.path.join(dir,'grimm.output')]
    subprocess.call(" ".join(params), shell=True)
    print 'grimm output is saved to', os.path.join(dir,'grimm.output')
    print utils.get_time()
    
    print 'done.'
Exemple #51
0
def packagestatus(req, **vars):

	# Validate/sanitize args to catch some common security errors
        vars = www_utils.sanitize_args(vars)


	try:
		platform = vars['platform']
		package = vars['package']
		HEAD_or_RELEASE = vars['HEAD_or_RELEASE']
	except KeyError:
		return "Invalid arguments"

	timezone_offset = www_utils.get_tz_cookie(req.headers_in)

	versions = build.get_versions(HEAD_or_RELEASE, platform, package)
	versions.reverse()

	if len(versions) <= 10:
		show_links = False
	else:   show_links = True

	# Flag to show all builds or not
	if vars.has_key('showall'):
		showall = True
	else:   showall = False


	# Only show the first 10 versions unless showall link is pushed
	if not showall and len(versions) > 10:
		versions = versions[:10]

	refresh_html = ""
	if not showall:
		refresh_html = """<meta http-equiv="refresh" content="60">"""

	req.content_type = "text/html"

	req.write("""
	%s
	<html>
	<head>
	%s
	<title>Mono Build Status</title>
	<link rel="stylesheet" href="../../build.css" type="text/css">
	</head>

	<body>""" % (doc_type, refresh_html) )

	if versions:
		req.write("<h1>%s -- %s -- %s</h1>" % (package, platform, HEAD_or_RELEASE))

		# Print out links...
		if show_links:
			if showall:
				req.write('<p><a href="packagestatus?platform=%s&amp;package=%s&amp;HEAD_or_RELEASE=%s">Show Latest 10 Builds</a></p>' % (platform, package, HEAD_or_RELEASE) )
			else:
				req.write('<p><a href="packagestatus?platform=%s&amp;package=%s&amp;HEAD_or_RELEASE=%s&amp;showall=1">Show Full Build History</a></p>' % (platform, package, HEAD_or_RELEASE) )

		for version in versions:

			build_info = datastore.build_info(HEAD_or_RELEASE, platform, package, version)
			values = build_info.get_build_info()

			if values['start'] and values['finish']:
				duration = www_utils.timecolor(utils.time_duration_clock(values['start'], values['finish']))
			# Sometimes the latest build isn't the one running (usually a bug), but still show the running time
			elif values['start']:
				duration = "Running for %s" % www_utils.timecolor(utils.time_duration_clock(values['start'], utils.get_time()))
			else:
				duration = "?"

			# New times based on clients timezone (accurrate?)
			try:
				tz_start = utils.adjust_for_timezone(timezone_offset, values['start'])
				tz_finish = utils.adjust_for_timezone(timezone_offset, values['finish'])
			except:
				raise `version`

			req.write("""

			<h3><a href="%s#%s" name="%s">Build status - %s</a></h3>

			<div>
			<table class="packagestatus">
			<tbody>
			<tr>
			<th>Build started:</th>

			<td>%s</td>

			<th>Build completed:</th>
			<td>%s</td>

			<th>Duration:</th>
			<td>%s</td>

			</tr>

			<tr>
			<th>Build host:</th>
			<td>%s</td>

			</tr>
			</tbody></table>
			</div>

			<h4>Build Steps</h4>
			<div>
			<table class="packagestatus">
			<tbody>""" % (req.unparsed_uri, version, version, version, tz_start, tz_finish, duration, values['buildhost']) )

			# Start through the build steps...	
			for step in build_info.get_steps_info(read_info=0):

				# Remove .gz extensions since apache will handle it
				log = os.path.join("..", "..", "builds", build_info.rel_files_dir, 'logs', step['log']).replace(".gz", "")

				h_class = ""
				if not ["success", "inprogress"].count(step['state']):
					h_class = 'class="faillinkcolor"'

				req.write("""
				<tr>
				<th>%s</th>
				<td><a %s href="%s">%s</a></td>
				""" % (step['name'], h_class, log, step['state']) )

				# If there's download info, and it exists, add it to the html
				#  (It won't exist on the mirrored public site)
				if step.has_key('download'):
					temp_rel_path =  os.path.join(build_info.rel_files_dir, "files", step['download'])
				else:   temp_rel_path = ""

				if temp_rel_path and os.path.exists(os.path.join(config.web_root_dir, 'builds', temp_rel_path)):

					download_file = os.path.join("..", "..", "builds", temp_rel_path)

					req.write("""
					<td><a href="%s">%s</a></td>
					""" % (download_file, step['download']) )
				else:
					req.write("<td></td>")

				if step['start'] and step['finish']:
					req.write("<td>[ %s ] </td>" % www_utils.timecolor(utils.time_duration_clock(step['start'], step['finish']) ))
				# Sometimes the latest build isn't the one running (usually a bug), but still show the running time
				elif step['start']:
					req.write("<td>[ Running for %s ] </td>" % www_utils.timecolor(utils.time_duration_clock(step['start'], utils.get_time()) ))
				req.write("</tr>")

				
			req.write("</tbody></table></div><br>")


	else:
		req.write("<h1>Mono Build Status</h1>")
		req.write("<p>No information found: %s -- %s -- %s</p>" % (package, platform, HEAD_or_RELEASE) )


	req.write("""
	</body>
	</html>""")
    start_date = datetime.date(2013, 4, 8)

    # End date
    end_date = datetime.date(2015, 7, 5)

    # Step size
    step_size = 400

    articles_lost = 0
    all_clusters = []

    data = utils.load_nyt(start_date=start_date.isoformat(),
                          end_date=end_date.isoformat(),
                          keywords="china")

    output_file = "top_articles_new/dump_to_file_" + utils.get_time()
    f = open(output_file, "a")

    for i in range(0, len(data), step_size):
        # with utils.stdout_redirect(f):

        # cluster = analyse.BisectingKmeans(data[i:i+step_size])
        cluster = analyse.BigClamArticle(data[i:i + step_size], coms=130)

        cluster.compute()
        cluster.find_computed_cluster_metrics()

        # for x in cluster.computed_clusters:
        #     x.display()

        print
def get_est_time(activities):
	rem_time=utils.get_time(activities)
	return json.dumps(rem_time)
def count_lengths_distribution_for_coverage(c,z):
    print utils.get_time()
    print 'calculating for coverage',c
    filtered_lengths = [j for _,j in filter(lambda x : x[0] == c,z)]
    print 'data size is',len(filtered_lengths)
    return filtered_lengths

def process_coverage(covs,lengths):
    print utils.get_time()
    print 'processing the data...'
    z = zip(covs,lengths)
    unique_covs = sorted(list(set(covs)))
    if 1 in unique_covs:
        unique_covs.remove(1)
    filtered_lengths=[]
    for c in unique_covs:
        if c == 27 :
            break
        filtered_lengths.append(count_lengths_distribution_for_coverage(c,z))
    draw_distr(filtered_lengths[:24],unique_covs[:24],'2-25.pdf')

         

if __name__ == '__main__' :
    parser = argparse.ArgumentParser()
    parser.add_argument(file,help='file in bed format having four columns - the last one is the coverage values')
    args = parser.parse_args()
    covs,lengths = extract_coverage(args.file, True)
    print utils.get_time()
    process_coverage(covs,lengths)
 def post(self):
     title=u"注册"
     username=self.get_argument('username','')
     name=self.get_argument('name','')
     email=self.get_argument('email','')
     password=self.get_argument('password','')
     password_confirm=self.get_argument('password_confirm',None)
     coll=self.get_database()
     user=coll.find_one({'username':username})
     try:
         if user['username']==username:
             self.render("register.html",title=title,message="the username:%s has been registed"%username)
             return
     except:
         pass
     errMsg=''
     if '' in [username,name,email,password,password_confirm]:
         self.render("register.html",title=title,message="please input all fields")
         return
     if len(username)<6 or len(username)>16:
         errMsg+='the length of username must be in 6 to 16'
     if '@' not in email:
         errMsg+='email is invalid'
     if password !=password_confirm:
         errMsg+='password field is not the same'
     if errMsg!='':
         self.render("register.html",title=title,message=errMsg)
         return
     else:
         USER=User(username=username,email=email,name=name,password=password,created_at=get_time())
         coll=self.get_database()
         coll.insert_one({'username':username,'name':name,'email':email,'password':password,'User':USER})
         self.set_secure_cookie("username",username)
         self.redirect("/")
def generate_for_grimm() :
    if len(sys.argv) < 6:
        print 'USAGE:', sys.argv[0], 'grimm', 'homology_file', 'first_specie_gff', 'second_specie_gff', 'output_directory'
        exit()
    print utils.get_time()
    print 'parsing homology file...'
    homology = parse_homology(sys.argv[2])
    print utils.get_time()
    print 'parsing genes for the first species...'
    first_specie_genes = parse_genes(sys.argv[3])
    print utils.get_time()
    print 'parsing genes for the second species...'
    second_specie_genes = parse_genes(sys.argv[4])
    #homology = reduce_homology(first_specie_genes, second_specie_genes, homology)
    directory = sys.argv[5]
    utils.create_dir_if_not_exists(directory)
    file = os.path.join(directory,'grimm.input')
    print utils.get_time()
    print 'writing genes to file...'
    coding_table = output_genes('genome1', first_specie_genes, homology, file)
    output_genes_and_rename('genome2', second_specie_genes, homology, coding_table, file)
    coding_table_file = os.path.join(directory,'coding_table.txt')
    print utils.get_time()
    print 'saving coding table...'
    save_coding_table(coding_table, coding_table_file)
    print utils.get_time()
    print 'done.'
Exemple #57
0
def get_params():
   global params
   params={}
   params['run_mode']=0 #0,full,1:resume, 2,X
   params["rn_id"]="lstm_tes" #running id, model
   params["notes"]="lstm with 16 joints" #running id
   params["model"]="lstm"#kccnr,dccnr
   params["optimizer"]="Adam" #1=classic kcnnr, 2=patch, 3=conv, 4 =single channcel
   params['seq_length']= 50
   params['validate']= 1
   params['mfile']= "autoencoder_autoencoder_144_0.0179401_best.p"

   params['batch_size']=1
   params['shufle_data']=1
   params["corruption_level"]=0.5

   #system settings
   wd=os.path.dirname(os.path.realpath(__file__))
   wd=os.path.dirname(wd)
   params['wd']=wd
   params['log_file']=wd+"/logs/"+params["model"]+"_"+params["rn_id"]+"_"+str(params['run_mode'])+"_"+utils.get_time()+".txt"
   params["model_file"]=wd+"/cp/"
   params["data_dir"]="/mnt/Data2/DataFelix/hc/rnn/old_blanket/"

   # early-stopping parameters
   params['patience']= 10000  # look as this many examples regardless
   params['patience_increase']=2  # wait this much longer when a new best is
   params['improvement_threshold']=0.995  # a relative improvement of this much is

   # learning parameters
   params['momentum']=0.9    # the params for momentum
   params['lr']=0.00001
   params['learning_rate_decay']= 0.998
   params['squared_filter_length_limit']=15.0
   params['n_epochs']=25600
   params['n_hidden']= 1000
   params['n_output']= 48

   if(platform.node()=="coskunh"):
       params["caffe"]="/home/coskun/sftpkg/caffe/python"
       params['batch_size']=20
       params["WITH_GPU"]=False
       params['n_patch']= 1
       params['n_repeat']= 1
       params["data_dir"]="/home/coskun/PycharmProjects/data/auto/"
       params['n_hidden']= 128
       params['max_count']= 10000

   if(platform.node()=="milletari-workstation"):
       params["data_dir"]="/mnt/Data1/hc/img/"
       params["caffe"]="/usr/local/caffe/python"
       params["WITH_GPU"]=True
       params['n_hidden']= 1000
       params['max_count']=10000000000

   if(platform.node()=="cmp-comp"):
       params['batch_size']=60
       params["n_procc"]=1
       params["WITH_GPU"]=True
       params["caffe"]="/home/coskun/sftpkg/caffe/python"
       params["data_dir"]="/mnt/Data1/hc/joints/"
       params['n_hidden']= 128
       params['max_count']= 100

   #params['step_size']=[10]
   params['test_size']=0.20 #Test size
   params['val_size']=0.20 #val size
   params['test_freq']=100 #Test frequency
   return params
Exemple #58
0
def update_params(params):
   params['log_file']=params["wd"]+"/logs/"+params["model"]+"_"+params["rn_id"]+"_"+str(params['run_mode'])+"_"+utils.get_time()+".txt"
   return params
Exemple #59
0
def main():
    first = utils.get_time("Enter the first time: ", "Die in a fire") 
    second = utils.get_time("Enter the second time: ", "Fall off a bridge") 
    print find_difference(split(first, second))
Exemple #60
0
def get_params():
   global params
   params={}
   params['run_mode']=3 #1,training,2=resuming training.., 3=prediction
   params["rn_id"]="normal_b" #running id, model
   params["notes"]="blanket lstm simple running" #running id
   params["model"]="lstm"#kccnr,dccnr
   params["optimizer"]="Adam" #1=classic kcnnr, 2=patch, 3=conv, 4 =single channcel
   params['seq_length']= 20
   params['validate']= 1
   params['mfile']= "lstm_normal_0.p"

   params['batch_size']=100
   params['shufle_data']=1
   params['max_count']= 300

   #system settings
   wd=os.path.dirname(os.path.realpath(__file__))
   wd=os.path.dirname(wd)
   params['wd']=wd
   params['log_file']=wd+"/logs/"+params["model"]+"_"+params["rn_id"]+"_"+str(params['run_mode'])+"_"+utils.get_time()+".txt"
   params["model_file"]=wd+"/cp/"
   params["data_dir"]="/home/coskun/projects/data/old_blanket/"




   # early-stopping parameters
   params['patience']= 10000  # look as this many examples regardless
   params['patience_increase']=2  # wait this much longer when a new best is
   params['improvement_threshold']=0.995  # a relative improvement of this much is

   #Model parameters...
   params['n_hidden']= 512
   params['n_output']= 42
   params['input_size']=1024
   # learning parameters
   params['momentum']=0.9    # the params for momentum
   params['lr']=0.0001
   params['lr_decay']=0.01
   params['squared_filter_length_limit']=15.0
   params['n_epochs']=10000
   params['keep_prob']= 0.75
   params['max_grad_norm']= 5
   params["init_scale"]= 0.05


   if(platform.node()=="coskunh"):
       params["caffe"]="/home/coskun/sftpkg/caffe/python"
       params['batch_size']=10
       params["WITH_GPU"]=False
       params['n_patch']= 1
       params['n_repeat']= 1
       params['n_hidden']= 128
       params['max_count']= 100000

   if(platform.node()=="milletari-workstation"):
       params["data_dir"]="/home/coskun/PycharmProjects/data/rnn/180k/"
       params["caffe"]="/usr/local/caffe/python"
       params["WITH_GPU"]=True
       params['n_hidden']= 128
       params['max_count']= 30000000

   if(platform.node()=="cmp-comp"):
       params['batch_size']=60
       params["n_procc"]=1
       params["WITH_GPU"]=True
       params["caffe"]="/home/coskun/sftpkg/caffe/python"
       params["data_dir"]="/home/cmp/PycharmProjects/data/rnn/"
       params['n_hidden']= 128
       params['max_count']= 3000000

   #params['step_size']=[10]
   params['test_size']=0.20 #Test size
   params['val_size']=0.20 #val size
   params['test_freq']=10 #Test frequency
   return params