Example #1
0
def main():
    DataManager.create_instances_from_data('malopolska.csv')
    # UI.welcome_screen()
    while 1:
        UI.print_menu()
        option = UI.user_input("Option: ")
        if option == '1':
            UI.print_result(UI.build_table(['','MAŁOPOLSKIE'], Statistic.total()))
            UI.user_input('\nPress Enter to continue...')
        elif option == '2':
            UI.print_result('\n'.join(Statistic.three_longest_city()))
            UI.user_input('\nPress Enter to continue...')
        elif option == '3':
            UI.print_result(Statistic.largest_number_of_commun())
            UI.user_input('\nPress Enter to continue...')
        elif option == '4':
            UI.print_result(Statistic.more_than_one_category())
            UI.user_input('\nPress Enter to continue...')
        elif option == '5':
            UI.print_result(UI.build_table(['LOCATION', 'TYPE'], Statistic.advanced_search(UI.user_input('Searching for: '))))
            UI.user_input('\nPress Enter to continue...')
        elif option == '0':
            UI.print_result('')
            break
        else:
            pass
Example #2
0
def main(_):
  model_dir = util.get_model_dir(conf, 
      ['data_dir', 'sample_dir', 'max_epoch', 'test_step', 'save_step',
       'is_train', 'random_seed', 'log_level', 'display', 'runtime_base_dir', 
       'occlude_start_row', 'num_generated_images'])
  util.preprocess_conf(conf)
  validate_parameters(conf)

  data = 'mnist' if conf.data == 'color-mnist' else conf.data 
  DATA_DIR = os.path.join(conf.runtime_base_dir, conf.data_dir, data)
  SAMPLE_DIR = os.path.join(conf.runtime_base_dir, conf.sample_dir, conf.data, model_dir)

  util.check_and_create_dir(DATA_DIR)
  util.check_and_create_dir(SAMPLE_DIR)
  
  dataset = get_dataset(DATA_DIR, conf.q_levels)

  with tf.Session() as sess:
    network = Network(sess, conf, dataset.height, dataset.width, dataset.channels)

    stat = Statistic(sess, conf.data, conf.runtime_base_dir, model_dir, tf.trainable_variables())
    stat.load_model()

    if conf.is_train:
      train(dataset, network, stat, SAMPLE_DIR)
    else:
      generate(network, dataset.height, dataset.width, SAMPLE_DIR)
    def compare_factor_teach_test(self, db_teach, db_test, col_names=ALL):
        db_teach = db_teach.copy()
        db_test = db_test.copy()

        if 'amount' not in list(db_teach):
            db_teach['amount'] = 1
        if 'amount' not in list(db_test):
            db_test['amount'] = 1

        if col_names.__class__ == str and col_names == ALL:
            col_names = list(db_teach)
        factor_list = self.get_list_factors(col_names)

        compare_diff = []

        for f in factor_list:
            st_teach = Statistic.get_stat_summarise_by_column(db_teach, f)
            st_test = Statistic.get_stat_summarise_by_column(db_test, f)
            st = st_test.merge(st_teach,
                               left_index=True,
                               right_index=True,
                               how='left')
            st.p_y = st.p_y.fillna(0.5)
            diff_val = ((st.p_x - st.p_y) * st.n_x).abs().sum()
            compare_diff.append(diff_val)

        data = {'factor': factor_list, 'compare_diff': compare_diff}
        df = pd.DataFrame(data)
        df.sort_values(by='compare_diff', ascending=False, inplace=True)
        return df
 def run(self):
     """
 Compute the statistics
 """
     biter = self._getBiomodelIterator()
     report_count = REPORT_INTERVAL
     df = pd.DataFrame()
     for shim in biter:
         if shim.getException() is None:
             stat_dict = Statistic.getAllStatistics(shim)
         else:
             stat_dict = ErrorStatistic(shim).getStatistic()
         df = df.append(stat_dict, ignore_index=True)
         if IS_MAIN:
             report_count += -1
             if report_count < 1:
                 df.to_csv(self._ot_path_data)
                 print("Completed Biomodel ID %s." % shim.getBiomodelId())
                 report_count = REPORT_INTERVAL
     df.to_csv(self._ot_path_data, index=False)
     doc_dict = {
         "Column": Statistic.getDoc().keys(),
         "Description": Statistic.getDoc().values(),
     }
     pd.DataFrame(doc_dict).to_csv(self._ot_path_doc, index=False)
     if IS_MAIN:
         print("Done!")
Example #5
0
    def __init__(self,
                 teach: pd.DataFrame,
                 test: pd.DataFrame,
                 white_list=None):

        bad_statuses = Statistic.BAD_STATUSES
        self.N_WHITE_LIST = white_list.shape[0]

        self.N_TEACH = teach.shape[0]
        self.N_TEACH_BAD = Statistic.get_dt_bad(teach).shape[0]

        self.N_TEST = test.shape[0]
        test.amount = pd.to_numeric(test.amount, errors="coerce")
        self.AMOUNT_TEST = sum(test.amount)

        test["cum_amount"] = test.amount.cumsum()

        test_bad = Statistic.get_dt_bad(test)
        self.N_TEST_BAD = test_bad.shape[0]
        self.AMOUNT_TEST_BAD = sum(test_bad.amount)

        test_in_wl = test[test.id.isin(white_list.ID)]
        self.AMOUNT_TEST_IN_WL = sum(test_in_wl.amount)
        self.N_TEST_IN_WL = test_in_wl.shape[0]

        mask = (test.id.isin(white_list.ID)) & (test.status.isin(bad_statuses))
        test_bad_in_wl = test[mask]

        self.N_TEST_BAD_IN_WL = test_bad_in_wl.shape[0]
        self.AMOUNT_TEST_BAD_IN_WL = sum(test_bad_in_wl.amount)
Example #6
0
def do_default():
    """
    Play with a neural network against random
    """
    player1 = get_agent_by_config_name('nn_pg_2', 'best')
    player2 = get_agent_by_config_name('random', 'None')

    player1.training = True
    player2.training = True

    stats = Statistic(player1, verbose=True)

    # play games forever
    while True:

        bg = Backgammon()
        bg.set_player_1(player1)
        bg.set_player_2(player2)
        winner = bg.play()

        player1.add_reward(winner)
        player2.add_reward(-winner)

        # Reward the neural network agent
        # player1.reward_player(winner)

        stats.add_win(winner)
Example #7
0
def nn_vs_nn_export_better_player():
    player1 = NNAgent1(verbose = True)
    player2 = NNAgent1(load_best=True)

    stats = Statistic(player1, verbose=True)

    while True:
        bg = Backgammon()
        bg.set_player_1(player1)
        bg.set_player_2(player2)
        winner = bg.play()

        player1.add_reward(winner)
        player2.add_reward(-1 * winner)

        stats.add_win(winner)

        if stats.nn_is_better() and stats.games_played % 100 == 0:
            break

    # only way to reach this point is if the current
    # neural network is better than the BestNNAgent()
    # ... at least I think so
    # thus, we export the current as best
    print("Congratulations, you brought the network one step closer")
    print("to taking over the world (of backgammon)!!!")
    player1.export_model(filename="nn_best_model")
Example #8
0
def generate(dataset_name, occlusions=False):
    # Load dataset
    dataset, image_height, image_width, num_channels, next_train_batch, next_test_batch = load_images(dataset_name)

    # setup train, test
    train = dataset.train
    test = dataset.test
    SAMPLE_DIR = os.path.join('samples', dataset_name, 'generate')

    with tf.Session() as sess:
        network = Network(sess, image_height, image_width, num_channels)
        # tf.initialize_all_variables().run()

        stat = Statistic(sess, dataset_name, './', tf.trainable_variables(), 0)
        stat.load_model()
        num_images = 100
        if occlusions:
            orig_images = next_test_batch(num_images).reshape(
                        [num_images, image_height, image_width, num_channels])
            orig_images[:,image_height/2:,:,:] = 0
            samples = network.generate_images(num_images, starting_pos=[0, image_height / 2], starting_image=orig_images)
            # original_occlusions
            occlusion_dir = os.path.join('samples', dataset_name, "occlusions")
            save_images(orig_images, image_height, image_width, 10, 10, directory=occlusion_dir)
        else:
            samples=network.generate_images(num_images)
        save_images(samples, image_height, image_width, 10, 10, directory=SAMPLE_DIR)
Example #9
0
 def __init__(self, refPath, dataPath, dbFilename):
     GPIO.cleanup()
     GPIO.setmode(GPIO.BCM)
     GPIO.setup(self.AUTO_START_GPIO_PIN, GPIO.IN, pull_up_down=GPIO.PUD_UP)
     self.__i2c = I2C(2)
     self.__analog = Analog(sel.__i2c.getLock(), 0x49)
     self.default = Default()
     self.database = Database(dataPath, dbFilename)
     self.waterlevel = Waterlevel(debug, self.database)
     self.light = Light(self.database)
     self.curtain = Curtain(self.database)
     self.pressure = Pressure(self.database, self.default, self.__analog)
     self.temperature = Temperature("28-0417716a37ff", self.database)
     self.redox = Redox(debug, self.database, self.default, self.__analog)
     self.ph = PH(debug, self.database, self.default, self.__analog,
                  self.temperature)
     self.robot = Robot(debug, self.database)
     self.pump = Pump(debug, self.database, self.default, self.robot,
                      self.redox, self.ph, self.temperature)
     self.panel = Panel(debug, self.database, self.default, self.pump,
                        self.redox, self.ph, self.__i2c)
     self.statistic = Statistic(debug, self.pump, self.robot, self.redox,
                                self.ph, self.temperature, self.pressure,
                                self.waterlevel)
     self.refPath = refPath
     self.__autoSaveTick = 0
     self.__today = date.today().day - 1
     debug.TRACE(debug.DEBUG, "Initialisation done (Verbosity level: %s)\n",
                 debug)
Example #10
0
 def __init__(self, master=None, channel=None):
     super().__init__(master)
     self.master = master
     self.channel = channel
     self.pack()
     self.light1_stat = Statistic()
     self.light2_stat = Statistic()
     self.create_widgets()
Example #11
0
 def get_xgb_weight(self) -> pd.Series:
     teach = self.teach
     n_sample = teach.shape[0]
     n_bad = Statistic.get_dt_bad(teach).shape[0]
     w = int(n_sample / n_bad)
     teach["amount"] = pd.to_numeric(teach.amount, errors="coerce")
     weight = np.where(Statistic.is_status_bad(teach), w, 1)
     return weight
Example #12
0
 def get_stat_string(light_stat: Statistic):
     ret = list()
     ret.append("Usage of the light:\n ")
     ret.append(str(int(light_stat.get_time() / 60)))
     ret.append(" min ")
     ret.append(str(light_stat.get_time() % 60))
     ret.append(" sec\n Total time activation:\n ")
     ret.append(str(int(light_stat.get_time_from_activation() / 60)))
     ret.append(" min ")
     ret.append(str(light_stat.get_time_from_activation() % 60))
     ret.append(" sec")
     return ''.join(ret)
    def imitate(self, req_matrix):
        requests = self.create_request_list(req_matrix)

        requests.sort(lambda x, y: x.start - y.start)

        # start immitating
        stat = Statistic(self.nodes)

        for request in requests:
            stat.add_req(request)

        return stat
Example #14
0
    def testStatisticDaily (self):
        fname = u"examples/example_1_small.log"
        parser = LogParser (loadFile (fname))

        stat = Statistic (parser.commits)
        statlist = stat.getStat (Daily)

        self.assertEqual (len (statlist), 6)
        self.assertEqual (statlist[0][0], datetime.datetime (2012, 11, 24))
        self.assertEqual (statlist[0][1], 6)

        self.assertEqual (statlist[-1][0], datetime.datetime (2012, 12, 14))
        self.assertEqual (statlist[-1][1], 1)
Example #15
0
 def __init__(self, p1, p2):
     self.p1 = p1
     self.p2 = p2
     self.winner_stat = Statistic()
     self.nb_round_stat = Statistic()
     self.first_player_stat = Statistic()
     self.damage_stat = Statistic()
def run_game():
    """运行图片选择游戏"""
    # 初始化屏幕对象
    pygame.init()
    sp_settings = Settings()
    screen = pygame.display.set_mode(
        (sp_settings.screen_width, sp_settings.screen_height))
    pygame.display.set_caption('Select Picture')
    # gf.generate_pictures(sp_settings)  # 生成图片库(首次使用时需要)
    files = os.listdir('images')  # 获取images下所有图片名称
    # 加载所有图片
    all_pictures = Group()
    selected_pictures = Group()
    unselected_pictures = Group()
    optional_pictures = Group()
    for file in files:
        pics = Pictures(sp_settings, screen, file)
        pics.update()
        all_pictures.add(pics)
    # 初始化剩余图片数量
    stats = PicturesStats(len(all_pictures.sprites()))
    gf.choice_pictures(stats, all_pictures, selected_pictures,
                       unselected_pictures, optional_pictures)
    # 提示信息
    tips = Tips(sp_settings, screen, stats)
    # 统计图表
    st_chart = Statistic(sp_settings, screen)
    # 弹出登录框
    user_name = easygui.enterbox('Pls input you name...', title='')
    while not user_name:  # 输入正确的名字或按cancel结束循环
        if user_name is None:  # cancel 退出程序
            sys.exit()
        user_name = easygui.enterbox('Pls input you name...', title='')

    easygui.msgbox('Welcome %s' % (user_name, ))
    st_chart.load_history(user_name + '.txt')
    pygame.display.set_caption(user_name.title() + 'Select Picture ...')
    # 无限循环
    while True:
        # 监听事件
        gf.check_events(sp_settings, screen, stats, st_chart, all_pictures,
                        user_name, selected_pictures, unselected_pictures,
                        optional_pictures)
        gf.update_help_string(sp_settings, stats, optional_pictures)
        gf.update_tips(tips)
        gf.update_screen(sp_settings, screen, stats, tips, st_chart,
                         all_pictures, selected_pictures, unselected_pictures,
                         optional_pictures)
    def get_feature_importance(self, db_teach, col_names=ALL):
        db_teach = db_teach.copy()
        if 'amount' not in list(db_teach):
            db_teach['amount'] = 1

        if col_names.__class__ == str and col_names == ALL:
            col_names = list(db_teach)

        factor_list = self.get_list_factors(col_names)

        feature_importance = []
        n_unique = []

        for f in factor_list:
            st = Statistic.get_stat_summarise_by_column(db_teach, f)
            value = (st.p - 0.5).abs().sum()
            feature_importance.append(value)
            n_unique.append(st.shape[0])

        data = {
            'feature': factor_list,
            'feature_importance': feature_importance,
            'n_unique': n_unique
        }
        df = pd.DataFrame(data)
        df.sort_values(by='feature_importance', ascending=False, inplace=True)
        return df
Example #18
0
 def emulate_work(self, time_delta, requests_total_count):
     current_time = 0.0
     requests = []
     time_to_next_req_gen = self._get_time_req_gen(expon(scale=2.0), requests_total_count)
     time_to_next_req = 0
     rejected_requests = 0
     while np.any([not phase.is_free() for phase in self.phases]) or time_to_next_req_gen.has_next():
         while time_to_next_req == 0 and time_to_next_req_gen.has_next():
             requests.append(Request(time_start=current_time))
             if self.phases[0].has_place_for_request():
                 self.phases[0].send_request(requests[-1])
             else:
                 rejected_requests += 1
             time_to_next_req = round(time_to_next_req_gen.next(), 2)
         for i, phase in reversed(list(enumerate(self.phases))):
             waiting_channels = phase.move(time_delta)
             if i is not len(self.phases) - 1:
                 while self.phases[i + 1].has_place_for_request() and len(waiting_channels) > 0:
                     channel = waiting_channels.pop(waiting_channels.index(
                         min(waiting_channels, key=lambda channel: channel.request.time_start)
                     ))
                     request = channel.take_request()
                     self.phases[i + 1].send_request(request)
             else:
                 for channel in waiting_channels:
                     request = channel.take_request()
                     request.time_end = current_time
         current_time = round(current_time + time_delta, 2)
         if time_to_next_req > 0:
             time_to_next_req = round(time_to_next_req - time_delta, 2)
         if current_time % 1000 == 0:
             print '{}%'.format(len(requests) / float(requests_total_count))
     print '3.1 Requests process = {}, Current time = {}'.format(len(requests), current_time)
     return Statistic(requests=requests, phases=self.phases), current_time
Example #19
0
 def show_statistic(self):
     text, ok = QInputDialog.getText(self, 'Input Dialog', 'Enter name:')
     temp = Statistic.get_player_stat(text)
     self.text.insertPlainText('\n' + text + ' -  games: ' + str(temp[0]) +
                               ' win rate: ' + str(temp[1]) +
                               ' chip rate: ' + str(temp[2]) + '\n')
     self.text.ensureCursorVisible()
    def fit(self, db_teach, col_names=ALL):
        db_teach = db_teach.copy()
        model_bayes = {}
        if 'amount' not in list(db_teach):
            db_teach['amount'] = 1

        if col_names.__class__ == str and col_names == ALL:
            col_names = list(db_teach)

        factor_list = self.get_list_factors(col_names)
        print("factor_list = {}".format(factor_list))

        for col in factor_list:
            st = Statistic.get_stat_summarise_by_column(db_teach, col)
            col_good = col + '_fw'
            col_bad = col + '_tw'

            if self.calculation_type == 'counter':
                st.false_weight = np.where(st.n_bad == 0, 1, st.false_weight)
                st.true_weight = np.where(st.n_bad == 0, 1, st.true_weight)
            else:
                st.false_weight = np.where(st.n_bad == 0, 1,
                                           st.false_amount_weight)
                st.true_weight = np.where(st.n_bad == 0, 1,
                                          st.true_amount_weight)

            data = {
                col: list(st.index),
                col_good: st.false_weight.values,
                col_bad: st.true_weight.values
            }
            df = pd.DataFrame(data)
            model_bayes[col] = df

        self.model_bayes = model_bayes
Example #21
0
    def test_train_test_split_with_diff_ids_good_id(self):
        df = DataForTests.df_split
        X_train, X_validation, y_train, y_validation = Statistic.train_test_split_with_diff_ids(
            df, test_has_unique_ids=True)

        X_train['status'] = y_train
        X_validation['status'] = y_validation

        df_train = pd.merge(X_train, df[['id', 'n']], how='left', on=['n'])
        df_validation = pd.merge(X_validation,
                                 df[['id', 'n']],
                                 how='left',
                                 on=['n'])

        mask = df_train.status.isin(Statistic.BAD_STATUSES)
        train_good_ids = df_train[~mask].id.unique()

        mask = df_validation.status.isin(Statistic.BAD_STATUSES)
        validate_good_ids = df_validation[~mask].id.unique()

        self.assertGreater(len(train_good_ids), 0, 'X_train has no good ids')
        self.assertGreater(len(validate_good_ids), 0,
                           'X_validation has no good ids')

        s_intersect = list(set(train_good_ids) & set(validate_good_ids))

        self.assertEqual(
            len(s_intersect), 0,
            'X_train, X_validation have common good ids.'.format(s_intersect))
Example #22
0
 def get_count_3ds(self, percent: int) -> (float, float):
     test = self.get_convert_test()
     n_rows = round(self.params.N_TEST * percent / 100)
     test_first_n_rows = test.iloc[:n_rows, :]
     n_bad = Statistic.get_dt_bad(test_first_n_rows).shape[0]
     result = str(round(100 * n_bad / self.params.N_TEST_BAD, 2))
     threshold = str(round(test.probability.values[n_rows - 1], 6))
     return float(result), float(threshold)
 def __initEmptyStatsList(self):
     self.statsList.clear()
     
     self.statsList.append(Statistic("Total picture reveals", 0, Constants.TRIGGER_REVEALED_IMAGE, False))
     self.statsList.append(Statistic("Correct guesses", 0, Constants.TRIGGER_FOUND_IMAGE, False))
     self.statsList.append(Statistic("Wrong guesses", 0, Constants.TRIGGER_FOUND_WRONG_IMAGE, False))
     self.statsList.append(Statistic("Total in-game time", 0, Constants.TRIGGER_EXIT_LEVEL, True))
     self.statsList.append(Statistic("Attempted games", 0, Constants.TRIGGER_START_GAME, False))
     self.statsList.append(Statistic("Finished games", 0, Constants.TRIGGER_FINISH_GAME, False))
     self.statsList.append(Statistic("Money earned", 0, Constants.TRIGGER_EARNED_MONEY, False))
     self.statsList.append(Statistic("Money spent", 0, Constants.TRIGGER_SPENT_MONEY, False))
     self.statsList.append(Statistic("Drinks bought", 0, Constants.TRIGGER_BOUGHT_DRINK, False))
     
     self.__saveStats()
Example #24
0
 def get_amount_3ds(self, percent: int) -> (float, float):
     test = self.get_convert_test()
     test_first_cumsum_rows = test[test.cum_amount < percent *
                                   self.params.AMOUNT_TEST / 100]
     amount_bad = sum(Statistic.get_dt_bad(test_first_cumsum_rows).amount)
     result = str(round(100 * amount_bad / self.params.AMOUNT_TEST_BAD, 2))
     n_rows = test_first_cumsum_rows.shape[0]
     threshold = str(round(test.probability.values[n_rows - 1], 6))
     return float(result), float(threshold)
Example #25
0
    def test_train_test_split_with_diff_ids_size(self):
        df = DataForTests.df_split
        X_train, X_validation, y_train, y_validation = Statistic.train_test_split_with_diff_ids(
            df)

        self.assertEqual(X_train.shape[0], len(y_train),
                         'Xtrain, y_train have diff size')

        self.assertEqual(X_validation.shape[0], len(y_validation),
                         'X_validation, y_validation have diff size')
Example #26
0
    def getRecentMonthCateStatistic(self, recentNumber):
        year = datetime.date.today().year
        month = datetime.date.today().month
        sumPerc, sumCost = Statistic().getRecentMonthCateStatistic(
            recentNumber)
        allCate = Analysis().getAllCategoryAndId()
        csvStr = ''
        csvStr += '分类'
        csvPerc = ''
        csvPerc += '分类'
        #输出表头
        for i in range(0, recentNumber):
            thisMonth = month - i
            if thisMonth < 1:
                thisMonth = 12 + thisMonth
                thisYear = year - 1
            else:
                thisYear = year
            csvPerc += ",%s-%s(%%)" % (thisYear, thisMonth)
            csvStr += ",%s-%s(¥)" % (thisYear, thisMonth)
        csvStr += "\n"
        csvPerc += "\n"
        #手动添加总计分类
        csvStr += '总计'
        for i in range(0, recentNumber):
            thisMonth = month - i
            if thisMonth < 1:
                thisMonth = 12 + thisMonth
            csvStr += ',' + str(sumCost['总计'][thisMonth])
        csvStr += "\n"

        for cate in allCate:
            #循环分类
            csvStr += "%s" % cate['name']
            csvPerc += "%s" % cate['name']
            for i in range(0, recentNumber):
                #循环月份
                thisMonth = month - i
                if thisMonth < 1:
                    thisMonth = 12 + thisMonth

                # print precent of cate
                try:
                    csvPerc += ',' + sumPerc[cate['name']][thisMonth] + '%'
                except KeyError:
                    csvPerc += ',0%'
                # print cost of cate
                try:
                    csvStr += ',' + str(sumCost[cate['name']][thisMonth])
                except KeyError:
                    csvStr += ',0'
            csvStr += '\n'
            csvPerc += "\n"
        return csvStr + '\n' + csvPerc
Example #27
0
def train(dataset_name, max_epochs=10000, test_period=1):
    # Load dataset
    dataset, image_height, image_width, num_channels, next_train_batch, next_test_batch = load_images(
        dataset_name)

    # setup train, test
    train = dataset.train
    test = dataset.test

    num_train_batches = train.num_examples / BATCH_SIZE
    num_test_batches = test.num_examples / BATCH_SIZE

    with tf.Session() as sess:
        network = Network(sess, image_height, image_width, num_channels)
        # tf.initialize_all_variables().run()

        # TODO make more general
        stat = Statistic(sess, 'mnist', 'train', tf.trainable_variables(),
                         test_period)
        stat.load_model()
        SAMPLE_DIR = os.path.join('samples', 'mnist', 'train')
        initial_step = stat.get_t() if stat else 0

        sampled_images = []
        for epoch in xrange(max_epochs):
            print('Current epoch: %i' % epoch)
            training_costs = []
            for i in xrange(num_train_batches):
                images = binarize(next_train_batch(BATCH_SIZE)).reshape(
                    [BATCH_SIZE, image_height, image_width, num_channels])
                cost = network.test(images, with_update=True)
                training_costs.append(cost)
            # test
            if epoch % test_period == 0:
                print('Running tests...')
                testing_costs = []
                for i in xrange(num_test_batches):
                    images = binarize(next_test_batch(BATCH_SIZE)).reshape(
                        [BATCH_SIZE, image_height, image_width, num_channels])

                    cost = network.test(images, with_update=False)
                    testing_costs.append(cost)
                avg_train_cost = np.average(training_costs)
                avg_test_cost = np.average(testing_costs)
                print('Test cost at epoch %d: %04f' % (epoch, avg_test_cost))
                stat.on_step(avg_train_cost, avg_test_cost)

                samples = network.generate_images(100)
                save_images(samples,
                            image_height,
                            image_width,
                            10,
                            10,
                            directory=SAMPLE_DIR)
Example #28
0
def upload_statistics():
    stt = Statistic(source_data, data_frame)
    mode = stt.mode[0][0]
    mode = mode.tolist()
    return render_template('upload_statistics.html',
                           mean=stt.mean,
                           median=stt.median,
                           mode=mode,
                           min=stt.min,
                           max=stt.max,
                           var=stt.var,
                           frame=data_frame,
                           method='Statistics')
class TestStatistic(TestCase):
    def test_add_req(self):
        #given
        dest = Infocenter(10, 3)
        self.statistic = Statistic([(Node()), dest])

        request = Request(0, 1, 1)

        #when
        self.statistic.add_req(request)

        #then
        self.assertIsNone(self.statistic.timeline.state_for(dest, 0))

        state = self.statistic.timeline.state_for(1, 1)
        self.assertEqual(state.start, 1)
        self.assertTrue(8 <= state.end <= 14)
        self.assertEqual(state.node, 1)


    def test_add_req_with_intersection(self):
        # given
        dest = Infocenter(10, 3)
        self.statistic = Statistic([Node(), Node(), dest], False)

        first = Request(0, 2, 1)
        second = Request(1, 2, 2)

        # when
        self.statistic.add_req(first)
        self.statistic.add_req(second)

        # then
        first_state = self.statistic.timeline.state_for(2, 1)
        second_state = self.statistic.timeline.state_for(2, first_state.end + 1)

        self.assertIsNotNone(second_state)
        self.assertEquals(first_state.end, second_state.start)


    def test_fill_stat_string(self):
        #given
        self.statistic = Statistic([Node(), Infocenter(2, 1)], False)

        node_id = 1
        states = [State(node_id, 3, 8), State(node_id, 9, 15)]

        #when
        actual = self.statistic.fill_stat_string(node_id, states)

        #then
        util = ((8 - 3) + (15 - 9)) / float(1000)
        expected = STAT_STRING.format(node_id=node_id, type="Infocenter", req_proc=2, req_sent=0, util=util, avail=1.0)
        self.assertEquals(actual, expected)
Example #30
0
def calc_busload(single_test):
    messages = single_test.dbc.messages
    b11 = 47
    b29 = 65
    bitr = int(single_test.cnf['baudrate'])
    overhead = (b11 if single_test.cnf['id_size'] == 'Force 11 bits' else b29)
    auto_size = False
    if single_test.cnf['id_size'] == 'Auto':
        auto_size = True
    sum_message_load = 0.0
    message_count = 0.0
    bit_stuff = int(single_test.cnf['bit_stuffing']) / 100
    output_list = []
    output_not_used_list = []
    output_ignored = []
    message_time = 0

    for message in messages:
        message_time = 0
        message_load = 0
        if message.name not in single_test.erase_message:
            if message.cycle > 0:
                ml = message.size
                if auto_size:
                    overhead = (b11 if int(message.id) < 2100 else b29)
                message_time = ((ml * 8 + overhead) +
                                ((ml * 8 + overhead - 13) * bit_stuff)) / bitr
                message_load = (message_time / message.cycle)
                sum_message_load += message_load
                message_count += 1
                output_list.append(
                    MessageOut(message.name, message.id, message.size,
                               message.cycle, message_time, message_load))
                event_log(output_list[-1])
            else:
                output_not_used_list.append(
                    MessageOut(message.name, message.id, message.size,
                               message.cycle, message_time, message_load))
                event_log(output_not_used_list[-1])
        else:
            # eventLog("[-] message %s doesn't used in busload calc" % message.name)
            output_not_used_list.append(
                MessageOut(message.name, message.id, message.size,
                           message.cycle, message_time, message_load))
            event_log(output_not_used_list[-1])

    result = Statistic(sum_message_load * 100, message_count, len(messages))

    result_output = [output_list, output_not_used_list, result, output_ignored]
    event_log('---> busload: %0.6f' % sum_message_load, single_test)
    single_test.result = result_output
    def test_fill_stat_string(self):
        #given
        self.statistic = Statistic([Node(), Infocenter(2, 1)], False)

        node_id = 1
        states = [State(node_id, 3, 8), State(node_id, 9, 15)]

        #when
        actual = self.statistic.fill_stat_string(node_id, states)

        #then
        util = ((8 - 3) + (15 - 9)) / float(1000)
        expected = STAT_STRING.format(node_id=node_id, type="Infocenter", req_proc=2, req_sent=0, util=util, avail=1.0)
        self.assertEquals(actual, expected)
Example #32
0
def main(_):
  # preprocess
  conf.observation_dims = eval(conf.observation_dims)

  for flag in ['memory_size', 't_target_q_update_freq', 't_test',
               't_ep_end', 't_train_max', 't_learn_start', 'learning_rate_decay_step']:
    setattr(conf, flag, getattr(conf, flag) * conf.scale)

  if conf.use_gpu:
    conf.data_format = 'NCHW'
  else:
    conf.data_format = 'NHWC'

  model_dir = get_model_dir(conf,
      ['use_gpu', 'max_random_start', 'n_worker', 'is_train', 'memory_size', 'gpu_fraction',
       't_save', 't_train', 'display', 'log_level', 'random_seed', 'tag', 'scale'])

  sess_config = tf.ConfigProto(
      log_device_placement=False, allow_soft_placement=conf.allow_soft_placement)
  sess_config.gpu_options.allow_growth = conf.allow_soft_placement

  with tf.Session(config=sess_config) as sess:
    env = AtariEnvironment(conf.env_name, conf.n_action_repeat,
                             conf.max_random_start, conf.observation_dims,
                             conf.data_format, conf.display, conf.use_cumulated_reward)


    pred_network = CNN(sess=sess,
                         data_format=conf.data_format,
                         history_length=conf.history_length,
                         observation_dims=conf.observation_dims,
                         output_size=env.env.action_space.n,
                         network_header_type=conf.network_header_type,
                         name='pred_network', trainable=True)
    target_network = CNN(sess=sess,
                           data_format=conf.data_format,
                           history_length=conf.history_length,
                           observation_dims=conf.observation_dims,
                           output_size=env.env.action_space.n,
                           network_header_type=conf.network_header_type,
                           name='target_network', trainable=False)
    

    stat = Statistic(sess, conf.t_test, conf.t_learn_start, model_dir, pred_network.var.values())
    agent = TrainAgent(sess, pred_network, env, stat, conf, target_network=target_network)

    if conf.is_train:
      agent.train(conf.t_train_max)
    else:
      agent.play(conf.ep_end)
Example #33
0
    def test_train_test_split_with_diff_ids_sample(self):
        df = DataForTests.df_split
        X_train, X_validation, y_train, y_validation = Statistic.train_test_split_with_diff_ids(
            df)

        X_train['status'] = y_train
        X_validation['status'] = y_validation

        X = pd.concat([X_train, X_validation])
        X.sort_values('n', inplace=True)

        df_dict = df.drop(columns=['id']).to_dict()
        X_dict = X.to_dict()

        self.assertDictEqual(df_dict, X_dict, 'Split is not full')
Example #34
0
    def _get_not_uniformed_axis(self, spheres, current_axis):
        print(f"\nCheck uniform distributions for axises: {current_axis} start.") 
        axis_to_check = copy.deepcopy(current_axis)
        stat = Statistic()

        uniform_approves = []

        for axis in axis_to_check:
            axis_arr = []

            for figure in spheres:
                axis_arr.append(figure[axis])

            dist = stat.get_single_axis_distribution(axis_arr, self.ranges[axis], pocket_count=self.pocket_count)
            is_axis_uniform = abs(np.polyfit(np.arange(self.pocket_count), dist, 1)[0]) < 0.2
            print(f"Is axis {axis} uniformed: {is_axis_uniform}")
            uniform_approves.append(is_axis_uniform)

            if is_axis_uniform:
                current_axis.remove(axis)
        
        is_fully_uniform = all(uniform_approves)
        print(f"Check uniform distributions for axises: {axis_to_check} done") 
        return is_fully_uniform, current_axis
Example #35
0
    def getRecentMonthRecordDetail(self, recentNumber):
        year = datetime.date.today().year
        month = datetime.date.today().month
        csvStr = ''
        for i in range(0, 3):
            thisMonth = month - i
            if thisMonth < 1:
                thisMonth = 12 + thisMonth
                thisYear = year - 1
            else:
                thisYear = year
            csvStr += "%s-%s\n" % (thisYear, thisMonth)
            csvStr += Statistic().getAnalysisByYearMonthAndRecord(
                thisYear, thisMonth)

        return csvStr
    def test_add_req(self):
        #given
        dest = Infocenter(10, 3)
        self.statistic = Statistic([(Node()), dest])

        request = Request(0, 1, 1)

        #when
        self.statistic.add_req(request)

        #then
        self.assertIsNone(self.statistic.timeline.state_for(dest, 0))

        state = self.statistic.timeline.state_for(1, 1)
        self.assertEqual(state.start, 1)
        self.assertTrue(8 <= state.end <= 14)
        self.assertEqual(state.node, 1)
    def test_add_req_with_intersection(self):
        # given
        dest = Infocenter(10, 3)
        self.statistic = Statistic([Node(), Node(), dest], False)

        first = Request(0, 2, 1)
        second = Request(1, 2, 2)

        # when
        self.statistic.add_req(first)
        self.statistic.add_req(second)

        # then
        first_state = self.statistic.timeline.state_for(2, 1)
        second_state = self.statistic.timeline.state_for(2, first_state.end + 1)

        self.assertIsNotNone(second_state)
        self.assertEquals(first_state.end, second_state.start)
Example #38
0
def main(_):
  model_dir = get_model_dir(conf,
      ['data_dir', 'sample_dir', 'max_epoch', 'test_step', 'save_step',
       'is_train', 'random_seed', 'log_level', 'display'])
  preprocess_conf(conf)

  DATA_DIR = os.path.join(conf.data_dir, conf.data)
  SAMPLE_DIR = os.path.join(conf.sample_dir, conf.data, model_dir)

  check_and_create_dir(DATA_DIR)
  check_and_create_dir(SAMPLE_DIR)

  # 0. prepare datasets
  if conf.data == "mnist":
    from tensorflow.examples.tutorials.mnist import input_data
    mnist = input_data.read_data_sets(DATA_DIR, one_hot=True)

    next_train_batch = lambda x: mnist.train.next_batch(x)[0]
    next_test_batch = lambda x: mnist.test.next_batch(x)[0]

    height, width, channel = 28, 28, 1

    train_step_per_epoch = mnist.train.num_examples / conf.batch_size
    test_step_per_epoch = mnist.test.num_examples / conf.batch_size
  elif conf.data == "cifar":
    from cifar10 import IMAGE_SIZE, inputs

    maybe_download_and_extract(DATA_DIR)
    images, labels = inputs(eval_data=False,
        data_dir=os.path.join(DATA_DIR, 'cifar-10-batches-bin'), batch_size=conf.batch_size)

    height, width, channel = IMAGE_SIZE, IMAGE_SIZE, 3

  with tf.Session() as sess:
    network = Network(sess, conf, height, width, channel)

    stat = Statistic(sess, conf.data, model_dir, tf.trainable_variables(), conf.test_step)
    stat.load_model()

    if conf.is_train:
      logger.info("Training starts!")

      initial_step = stat.get_t() if stat else 0
      iterator = trange(conf.max_epoch, ncols=70, initial=initial_step)

      for epoch in iterator:
        # 1. train
        total_train_costs = []
        for idx in xrange(train_step_per_epoch):
          images = binarize(next_train_batch(conf.batch_size)) \
            .reshape([conf.batch_size, height, width, channel])

          cost = network.test(images, with_update=True)
          total_train_costs.append(cost)

        # 2. test
        total_test_costs = []
        for idx in xrange(test_step_per_epoch):
          images = binarize(next_test_batch(conf.batch_size)) \
            .reshape([conf.batch_size, height, width, channel])

          cost = network.test(images, with_update=False)
          total_test_costs.append(cost)

        avg_train_cost, avg_test_cost = np.mean(total_train_costs), np.mean(total_test_costs)

        stat.on_step(avg_train_cost, avg_test_cost)

        # 3. generate samples
        samples = network.generate()
        save_images(samples, height, width, 10, 10,
            directory=SAMPLE_DIR, prefix="epoch_%s" % epoch)

        iterator.set_description("train l: %.3f, test l: %.3f" % (avg_train_cost, avg_test_cost))
        print
    else:
      logger.info("Image generation starts!")

      samples = network.generate()
      save_images(samples, height, width, 10, 10, directory=SAMPLE_DIR)
Example #39
0
def main(_):
  model_dir = get_model_dir(conf, 
      ['max_step', 'test_step', 'save_step', 'is_train', 'random_seed', 'log_level'])
  preprocess_conf(conf)

  data_format = "NHWC"
  model = "pixel_rnn" # pixel_rnn, pixel_cnn

  if conf.data == "mnist":
    mnist = input_data.read_data_sets("MNIST_data", one_hot=True)
    next_batch = lambda x: mnist.train.next_batch(x)[0]

    height, width, channel = 28, 28, 1

  with tf.Session() as sess:
    logger.info("Building %s." % model)

    if data_format == "NHWC":
      input_shape = [None, height, width, channel]
    elif data_format == "NCHW":
      input_shape = [None, height, width, channel]
    else:
      raise ValueError("Unknown data_format: %s" % data_format)

    l = {}

    l['inputs'] = tf.placeholder(tf.int32, [None, height, width, channel],)
    l['normalized_inputs'] = tf.div(tf.to_float(l['inputs']), 255., name="normalized_inputs")

    # residual connections of PixelRNN
    scope = "conv_inputs"
    logger.info("Building %s: %s" % (model, scope))

    # main reccurent layers
    if conf.use_residual and model == "pixel_rnn":
      l['conv_inputs'] = conv2d(l['normalized_inputs'], conf.hidden_dims * 2, [7, 7], "A", scope=scope)
    else:
      l['conv_inputs'] = conv2d(l['normalized_inputs'], conf.hidden_dims, [7, 7], "A", scope=scope)
    
    l_hid = l[scope]
    for idx in xrange(conf.recurrent_length):
      if model == "pixel_rnn":
        scope = 'diag_bilstm_%d' % idx
        l[scope] = l_hid = diagonal_bilstm(l_hid, conf, scope=scope)
      elif model == "pixel_cnn":
        scope = 'conv2d_B_%d' % idx
        l[scope] = l_hid = conv2d(l_hid, 3, [1, 1], "B", scope=scope)
      logger.info("Building %s: %s" % (model, scope))

    # output reccurent layers
    for idx in xrange(conf.out_recurrent_length):
      scope = 'conv2d_out_%d' % idx
      l[scope] = l_hid = tf.nn.relu(conv2d(l_hid, conf.out_hidden_dims, [1, 1], None, scope=scope))
      logger.info("Building %s: %s" % (model, scope))

    scope = 'output'
    if channel == 1:
      l['conv2d_out_final'] = conv2d(l_hid, 1, [1, 1], None, scope='conv2d_out_final')
      l[scope] = tf.nn.sigmoid(l['conv2d_out_final'])

      loss = tf.reduce_mean(
          tf.nn.sigmoid_cross_entropy_with_logits(l['conv2d_out_final'], l['normalized_inputs'], name='loss'))

      optimizer = tf.train.RMSPropOptimizer(conf.learning_rate)
      grads_and_vars = optimizer.compute_gradients(loss)

      new_grads_and_vars = \
          [(tf.clip_by_value(gv[0], conf.min_grad, conf.max_grad), gv[1]) for gv in grads_and_vars]
      optim = optimizer.apply_gradients(new_grads_and_vars)
    else:
      raise ValueError("Not implemented yet for RGB colors")

    logger.info("Building %s finished!" % model)

    stat = Statistic(sess, conf.data, model_dir, tf.trainable_variables(), conf.test_step)

    tf.initialize_all_variables().run()

    iterator = trange(conf.max_step, ncols=70, initial=stat.get_t())
    for i in iterator:
      images = next_batch(conf.batch_size).reshape([conf.batch_size, height, width, channel])

      _, cost = sess.run([optim, loss], feed_dict={l['inputs']: images})
      iterator.set_description("l: %s" % cost)
      print

      stat.on_step(cost)
Example #40
0
 def print_statistic(self):
     tmp = self.get_winner_and_loser()
     if tmp != 0:
         Statistic.to_file(tmp[0], tmp[1], self.__chip, tmp[2])
Example #41
0
 def __init__(self):
     Statistic.__init__(self)
     self._set_ncss_command("javancss -ncss | sed -e 's/Java NCSS: //'")
Example #42
0
def main(_):
  model_dir = get_model_dir(conf, 
      ['max_step', 'test_step', 'save_step', 'is_train', 'random_seed', 'log_level'])
  preprocess_conf(conf)

  if conf.use_gpu:
    data_format = "NHWC"
  else:
    data_format = "NCHW"

  if conf.data == "mnist":
    mnist = input_data.read_data_sets("MNIST_data", one_hot=True)
    next_batch = lambda x: mnist.train.next_batch(x)[0]

    height, width, channel = 28, 28, 1

  with tf.Session() as sess:
    logger.info("Building %s starts!" % conf.model)

    if data_format == "NHWC":
      input_shape = [None, height, width, channel]
    elif data_format == "NCHW":
      input_shape = [None, height, width, channel]
    else:
      raise ValueError("Unknown data_format: %s" % data_format)

    l = {}

    l['inputs'] = tf.placeholder(tf.float32, [None, height, width, channel],)

    if conf.data =='mnist':
      l['normalized_inputs'] = l['inputs']
    else:
      l['normalized_inputs'] = tf.div(l['inputs'], 255., name="normalized_inputs")

    # input of main reccurent layers
    scope = "conv_inputs"
    logger.info("Building %s" % scope)

    if conf.use_residual and conf.model == "pixel_rnn":
      l[scope] = conv2d(l['normalized_inputs'], conf.hidden_dims * 2, [7, 7], "A", scope=scope)
    else:
      l[scope] = conv2d(l['normalized_inputs'], conf.hidden_dims, [7, 7], "A", scope=scope)
    
    # main reccurent layers
    l_hid = l[scope]
    for idx in xrange(conf.recurrent_length):
      if conf.model == "pixel_rnn":
        scope = 'LSTM%d' % idx
        l[scope] = l_hid = diagonal_bilstm(l_hid, conf, scope=scope)
      elif conf.model == "pixel_cnn":
        scope = 'CONV%d' % idx
        l[scope] = l_hid = conv2d(l_hid, 3, [1, 1], "B", scope=scope)
      else:
        raise ValueError("wrong type of model: %s" % (conf.model))
      logger.info("Building %s" % scope)

    # output reccurent layers
    for idx in xrange(conf.out_recurrent_length):
      scope = 'CONV_OUT%d' % idx
      l[scope] = l_hid = tf.nn.relu(conv2d(l_hid, conf.out_hidden_dims, [1, 1], "B", scope=scope))
      logger.info("Building %s" % scope)

    if channel == 1:
      l['conv2d_out_logits'] = conv2d(l_hid, 1, [1, 1], "B", scope='conv2d_out_logits')
      l['output'] = tf.nn.sigmoid(l['conv2d_out_logits'])

      logger.info("Building loss and optims")
      loss = tf.reduce_mean(
          tf.nn.sigmoid_cross_entropy_with_logits(l['conv2d_out_logits'], l['normalized_inputs'], name='loss'))

      optimizer = tf.train.RMSPropOptimizer(conf.learning_rate)
      grads_and_vars = optimizer.compute_gradients(loss)

      new_grads_and_vars = \
          [(tf.clip_by_value(gv[0], -conf.grad_clip, conf.grad_clip), gv[1]) for gv in grads_and_vars]
      optim = optimizer.apply_gradients(new_grads_and_vars)
    else:
      raise ValueError("Not implemented yet for RGB colors")

    logger.info("Building %s finished!" % conf.model)

    show_all_variables()
    stat = Statistic(sess, conf.data, model_dir, tf.trainable_variables(), conf.test_step)

    logger.info("Initializing all variables")

    tf.initialize_all_variables().run()
    stat.load_model()

    if conf.is_train:
      logger.info("Training starts!")

      initial_step = stat.get_t() if stat else 0
      iterator = trange(conf.max_step, ncols=70, initial=initial_step)

      for i in iterator:
        if conf.data == 'mnist':
          images = binarize(next_batch(conf.batch_size)) \
            .reshape([conf.batch_size, height, width, channel])

        _, cost, output = sess.run([
            optim, loss, l['output']
          ], feed_dict={l['inputs']: images})

        if conf.data == 'mnist':
          print
          print mprint(images[1])
          print mprint(output[1], 0.5)

        if stat:
          stat.on_step(cost)

        iterator.set_description("l: %s" % cost)
    else:
      logger.info("Image generation starts!")
      samples = np.zeros((100, height, width, 1), dtype='float32')

      for i in xrange(height):
        for j in xrange(width):
          for k in xrange(channel):
            next_sample = binarize(l['output'].eval({l['inputs']: samples}))
            samples[:, i, j, k] = next_sample[:, i, j, k]

            if conf.data == 'mnist':
              print "=" * width
              mprint(binarize(samples[0,:,:,:]))

      save_images(samples, height, width, 10, 10)
Example #43
0
def packet_handler(client_socket, address, data_packet, stype='tcp'):# parse packet
    #print data_packet
    pack_len, pack_type, pack_extras = struct.unpack('HBB', data_packet[:4])
    data_content = data_packet[4:]
    #print 'length: %d, type: %d, extras: %d' % (pack_len, pack_type, pack_extras)

    res_data = ''  # process packet
    if pack_type == 10:		# login
        print 'loging request'
        url = ConfigAPI['base_url'] + 'vanet.auth.obd.getToken'
        res_data = call_http_api(url, data_content, 'post')
    elif pack_type == 11:		# gps data upload
        print 'gps data upload'
        url = ConfigAPI['base_url'] + 'vanet.obd.setPos'
        fmt = '=Qddiff'
        fmt_size = struct.calcsize(fmt)  # print fmt_size, len(data_content)
        if fmt_size != len(data_content):
            pack_extras = 1
            res_data = response_json_error('packet struct invalid')
            logging.info(res_data)
        else:
            pid, longtitude, latitude, gps_time, speed, course = struct.unpack(fmt, data_content)
            access_token = ''
            num_chars = struct.unpack('=8B',data_content[:8])
            for ch in num_chars:
                access_token += '%02x'% ch
            post_data = {'access_token': access_token,
                         'longtitude': longtitude,
                         'latitude': latitude,
                         'gps_time': gps_time+8*3600,
                         'speed': speed,
                         'course': course}   # print post_data
            post_data = json.dumps(post_data)
            res_data = call_http_api(url, post_data, 'post')
    elif pack_type == 12:  # obd data upload
        #print 'obd data upload'
        url = ConfigAPI['base_url'] + 'vanet.obd.setBaseData'
        fmt = '=QBHBBBHBBBHBHHBHHBBB'
        fmt_size = struct.calcsize(fmt)
        #print fmt_size, len(data_content)
        if (fmt_size != len(data_content)):
            pack_extras = 1
            res_data = response_json_error('packet struct invalid')
            logging.info(res_data)
        else:
            obd_data = struct.unpack(fmt, data_content)
            access_token = ''
            num_chars = struct.unpack('=8B', data_content[:8])
            for ch in num_chars:
                access_token += '%02x'% ch
            post_data = {'access_token':access_token,\
                         'DTC_CNT': obd_data[1],\
                         'DTCFRZF': obd_data[2],\
                         'LOAD_PCT': obd_data[3],\
                         'ECT': obd_data[4],\
                         'MAP': obd_data[5],\
                         'RPM': obd_data[6],\
                         'VSS': obd_data[7],\
                         'SPARKADV': obd_data[8],\
                         'IAT': obd_data[9],\
                         'MAF': obd_data[10],\
                         'TP': obd_data[11],\
                         'RUNTM': obd_data[12],\
                         'MIL_DIST': obd_data[13],\
                         'FLI': obd_data[14],\
                         'CLR_DIST': obd_data[15],\
                         'VPWR': obd_data[16],\
                         'AAT': obd_data[17],\
                         'FUEL_TYP': obd_data[18],\
                         'APP_R':obd_data[19]}
            #print access_token
            if post_data['VSS'] == 0x88:  # invalid data
                print 'invalid data'
                if accdict.has_key(access_token) and accdict[access_token] != -1:
                    accdict[access_token] += 1;
                else:
                    print 'Still Stop!!!!!!!!'
                    accdict[access_token] = -1;
                if accdict[access_token] >= 4:
                	accdict[access_token] = -1
                	eptdict = {'access_token':access_token}
                	urlpull = ConfigAPI['base_url'] + 'vanet.obd.getTripobddata'
                	eptdict = call_http_api(urlpull,json.dumps(eptdict),'post')
                	try:
                            eptdict = json.loads(eptdict)
                            s = Statistic(access_token)
                            if eptdict.has_key('msg'):
                                if type(eptdict['msg']) == type([]):
                                    print 'Valid trip data!!!!!!!!!'
                                    s.dbitems = eptdict['msg']
                                    sitem = s.runstatistic()
                			
                        	    sitem = json.dumps(sitem)
                        	    urlpush = ConfigAPI['base_url'] + 'vanet.obd.addTrip'
                                    print urlpush
                                    res_data = call_http_api(urlpush,sitem,'post')
                                    print res_data
                                else:
                                    print 'Server Error! No Data Info !!!!!!!!!'
    			except:
    				print 'Invalid Data!!'               
            else:
                accdict[access_token] = 0; # valid data
            #print post_data
            post_data = json.dumps(post_data)
            #print post_data
            res_data = call_http_api(url, post_data, 'post')
    elif (pack_type == 13):		# gps and pbd data upload
        print 'gps, obd data upload'
        url = ConfigAPI['base_url'] + 'vanet.obd.setMainData'
        fmt = '=QffBHBBBHBBBHBHHBHHBBB'
        fmt_size = struct.calcsize(fmt)
        #print fmt_size, len(data_content)
        if (fmt_size != len(data_content)):
            pack_extras = 1
            res_data = response_json_error('packet struct invalid')
            logging.info(res_data)
        else:
            obd_data = struct.unpack(fmt, data_content)
            access_token = ''
            num_chars = struct.unpack('=8B',data_content[:8])
            for ch in num_chars:
                access_token += '%02x'%ch
            post_data = {'access_token':access_token,
                         'longtitude':obd_data[1],
						 'latitude':obd_data[2],
						 'DTC_CNT':obd_data[3],
						 'DTCFRZF':obd_data[4],
						 'LOAD_PCT':obd_data[5],
						 'ECT':obd_data[6],
						 'MAP':obd_data[7],
						 'RPM':obd_data[8],
						 'VSS':obd_data[9],
						 'SPARKADV':obd_data[10],
						 'IAT':obd_data[11],
						 'MAF':obd_data[12],
						 'TP':obd_data[13],
						 'RUNTM':obd_data[14],
						 'MIL_DIST':obd_data[15],
						 'FLI':obd_data[16],
						 'CLR_DIST':obd_data[17],
						 'VPWR':obd_data[18],
						 'AAT':obd_data[19],
						 'FUEL_TYP':obd_data[20],
                         'APP_R':obd_data[21]}
            post_data = json.dumps(post_data)
            res_data = call_http_api(url, post_data, 'post')
    elif pack_type == 255:
        print 'error message upload'
        url = ConfigAPI['base_url'] + 'vanet.obd.setErrorData'
        if pack_len == 11 :
            fmt = '=QB'
        else:
            fmt = '=QB'+str(pack_len-11)+'s'
        fmt_size = struct.calcsize(fmt)   # print fmt_size, len(data_content)
        if pack_len - 2 != len(data_content):
            pack_extras = 1
            res_data = response_json_error('packet struct invalid')
            logging.info(res_data)
        else:
            if pack_len == 11 :
                pid, error_type = struct.unpack(fmt, data_content)
                error_msg = ''
            else:
                pid,error_type,error_msg = struct.unpack(fmt, data_content)
            access_token = ''
            num_chars = struct.unpack('=8B',data_content[:8])
            for ch in num_chars:
                access_token += '%02x'%ch
            post_data = {'access_token':access_token,
						 'error_type': error_type,
						 'error_msg':error_msg,}
            post_data = json.dumps(post_data)
            res_data = call_http_api(url, post_data, 'post')
	# check reply message
	try:
		ret = json.loads(res_data)
	except:
		print 'json loads exception for res_data:', res_data
		ret['ret'] = 1
	
	print 'ret code: ', ret['ret']
	if ret['ret']!=0:	# if api failed, return a the message to client
		pack_extras = 1
	if (pack_extras == 1):
		if (stype == 'tcp'):
			client_socket.sendall("reply current time: " + time.ctime())
		else:	# udp
			send_bytes = client_socket.sendto(res_data, address)
			print 'send len: %d, fact len: %d' % (send_bytes, len(res_data))
			if (send_bytes != len(res_data)):
				err_msg = 'udp sendto fail, data length: %d, send_bytes: %d' % (len(res_data), send_bytes)
				raise gevent.socket.error
Example #44
0
if __name__ == "__main__":
    if len(sys.argv) < 2:
        print "Using: python plotstat.py logfile.txt"
        exit(1)

    fname = sys.argv[-1]

    try:
        with open(fname) as fp:
            logstr = fp.read()
    except IOError, e:
        print "Can't open {0}".format(fname)
        exit(1)

    parser = LogParser(logstr)
    stat = Statistic(parser.commits)
    dateList = stat.getStat(Daily10)

    dates, count = zip(*dateList)

    # Преобразуем даты в числовой формат
    dates_float = [matplotlib.dates.date2num(date) for date in dates]

    # Вызовем subplot явно, чтобы получить экземпляр класса AxesSubplot,
    # из которого будем иметь доступ к осям
    axes = pylab.subplot(1, 1, 1)

    # Пусть в качестве меток по оси X выводится только год
    axes.xaxis.set_major_formatter(matplotlib.dates.DateFormatter("%Y.%m"))

    # Отобразим данные
Example #45
0
    def testStatisticEmpty (self):
        stat = Statistic ([])
        statlist = stat.getStat (Daily)

        self.assertEqual (len (statlist), 0)