Example #1
0
 def __init__(self, p1, p2):
     self.p1 = p1
     self.p2 = p2
     self.winner_stat = Statistic()
     self.nb_round_stat = Statistic()
     self.first_player_stat = Statistic()
     self.damage_stat = Statistic()
Example #2
0
 def __init__(self, master=None, channel=None):
     super().__init__(master)
     self.master = master
     self.channel = channel
     self.pack()
     self.light1_stat = Statistic()
     self.light2_stat = Statistic()
     self.create_widgets()
Example #3
0
def main(_):
  model_dir = util.get_model_dir(conf, 
      ['data_dir', 'sample_dir', 'max_epoch', 'test_step', 'save_step',
       'is_train', 'random_seed', 'log_level', 'display', 'runtime_base_dir', 
       'occlude_start_row', 'num_generated_images'])
  util.preprocess_conf(conf)
  validate_parameters(conf)

  data = 'mnist' if conf.data == 'color-mnist' else conf.data 
  DATA_DIR = os.path.join(conf.runtime_base_dir, conf.data_dir, data)
  SAMPLE_DIR = os.path.join(conf.runtime_base_dir, conf.sample_dir, conf.data, model_dir)

  util.check_and_create_dir(DATA_DIR)
  util.check_and_create_dir(SAMPLE_DIR)
  
  dataset = get_dataset(DATA_DIR, conf.q_levels)

  with tf.Session() as sess:
    network = Network(sess, conf, dataset.height, dataset.width, dataset.channels)

    stat = Statistic(sess, conf.data, conf.runtime_base_dir, model_dir, tf.trainable_variables())
    stat.load_model()

    if conf.is_train:
      train(dataset, network, stat, SAMPLE_DIR)
    else:
      generate(network, dataset.height, dataset.width, SAMPLE_DIR)
Example #4
0
 def emulate_work(self, time_delta, requests_total_count):
     current_time = 0.0
     requests = []
     time_to_next_req_gen = self._get_time_req_gen(expon(scale=2.0), requests_total_count)
     time_to_next_req = 0
     rejected_requests = 0
     while np.any([not phase.is_free() for phase in self.phases]) or time_to_next_req_gen.has_next():
         while time_to_next_req == 0 and time_to_next_req_gen.has_next():
             requests.append(Request(time_start=current_time))
             if self.phases[0].has_place_for_request():
                 self.phases[0].send_request(requests[-1])
             else:
                 rejected_requests += 1
             time_to_next_req = round(time_to_next_req_gen.next(), 2)
         for i, phase in reversed(list(enumerate(self.phases))):
             waiting_channels = phase.move(time_delta)
             if i is not len(self.phases) - 1:
                 while self.phases[i + 1].has_place_for_request() and len(waiting_channels) > 0:
                     channel = waiting_channels.pop(waiting_channels.index(
                         min(waiting_channels, key=lambda channel: channel.request.time_start)
                     ))
                     request = channel.take_request()
                     self.phases[i + 1].send_request(request)
             else:
                 for channel in waiting_channels:
                     request = channel.take_request()
                     request.time_end = current_time
         current_time = round(current_time + time_delta, 2)
         if time_to_next_req > 0:
             time_to_next_req = round(time_to_next_req - time_delta, 2)
         if current_time % 1000 == 0:
             print '{}%'.format(len(requests) / float(requests_total_count))
     print '3.1 Requests process = {}, Current time = {}'.format(len(requests), current_time)
     return Statistic(requests=requests, phases=self.phases), current_time
Example #5
0
def generate(dataset_name, occlusions=False):
    # Load dataset
    dataset, image_height, image_width, num_channels, next_train_batch, next_test_batch = load_images(dataset_name)

    # setup train, test
    train = dataset.train
    test = dataset.test
    SAMPLE_DIR = os.path.join('samples', dataset_name, 'generate')

    with tf.Session() as sess:
        network = Network(sess, image_height, image_width, num_channels)
        # tf.initialize_all_variables().run()

        stat = Statistic(sess, dataset_name, './', tf.trainable_variables(), 0)
        stat.load_model()
        num_images = 100
        if occlusions:
            orig_images = next_test_batch(num_images).reshape(
                        [num_images, image_height, image_width, num_channels])
            orig_images[:,image_height/2:,:,:] = 0
            samples = network.generate_images(num_images, starting_pos=[0, image_height / 2], starting_image=orig_images)
            # original_occlusions
            occlusion_dir = os.path.join('samples', dataset_name, "occlusions")
            save_images(orig_images, image_height, image_width, 10, 10, directory=occlusion_dir)
        else:
            samples=network.generate_images(num_images)
        save_images(samples, image_height, image_width, 10, 10, directory=SAMPLE_DIR)
Example #6
0
 def __init__(self, refPath, dataPath, dbFilename):
     GPIO.cleanup()
     GPIO.setmode(GPIO.BCM)
     GPIO.setup(self.AUTO_START_GPIO_PIN, GPIO.IN, pull_up_down=GPIO.PUD_UP)
     self.__i2c = I2C(2)
     self.__analog = Analog(sel.__i2c.getLock(), 0x49)
     self.default = Default()
     self.database = Database(dataPath, dbFilename)
     self.waterlevel = Waterlevel(debug, self.database)
     self.light = Light(self.database)
     self.curtain = Curtain(self.database)
     self.pressure = Pressure(self.database, self.default, self.__analog)
     self.temperature = Temperature("28-0417716a37ff", self.database)
     self.redox = Redox(debug, self.database, self.default, self.__analog)
     self.ph = PH(debug, self.database, self.default, self.__analog,
                  self.temperature)
     self.robot = Robot(debug, self.database)
     self.pump = Pump(debug, self.database, self.default, self.robot,
                      self.redox, self.ph, self.temperature)
     self.panel = Panel(debug, self.database, self.default, self.pump,
                        self.redox, self.ph, self.__i2c)
     self.statistic = Statistic(debug, self.pump, self.robot, self.redox,
                                self.ph, self.temperature, self.pressure,
                                self.waterlevel)
     self.refPath = refPath
     self.__autoSaveTick = 0
     self.__today = date.today().day - 1
     debug.TRACE(debug.DEBUG, "Initialisation done (Verbosity level: %s)\n",
                 debug)
Example #7
0
def nn_vs_nn_export_better_player():
    player1 = NNAgent1(verbose = True)
    player2 = NNAgent1(load_best=True)

    stats = Statistic(player1, verbose=True)

    while True:
        bg = Backgammon()
        bg.set_player_1(player1)
        bg.set_player_2(player2)
        winner = bg.play()

        player1.add_reward(winner)
        player2.add_reward(-1 * winner)

        stats.add_win(winner)

        if stats.nn_is_better() and stats.games_played % 100 == 0:
            break

    # only way to reach this point is if the current
    # neural network is better than the BestNNAgent()
    # ... at least I think so
    # thus, we export the current as best
    print("Congratulations, you brought the network one step closer")
    print("to taking over the world (of backgammon)!!!")
    player1.export_model(filename="nn_best_model")
Example #8
0
def do_default():
    """
    Play with a neural network against random
    """
    player1 = get_agent_by_config_name('nn_pg_2', 'best')
    player2 = get_agent_by_config_name('random', 'None')

    player1.training = True
    player2.training = True

    stats = Statistic(player1, verbose=True)

    # play games forever
    while True:

        bg = Backgammon()
        bg.set_player_1(player1)
        bg.set_player_2(player2)
        winner = bg.play()

        player1.add_reward(winner)
        player2.add_reward(-winner)

        # Reward the neural network agent
        # player1.reward_player(winner)

        stats.add_win(winner)
 def __initEmptyStatsList(self):
     self.statsList.clear()
     
     self.statsList.append(Statistic("Total picture reveals", 0, Constants.TRIGGER_REVEALED_IMAGE, False))
     self.statsList.append(Statistic("Correct guesses", 0, Constants.TRIGGER_FOUND_IMAGE, False))
     self.statsList.append(Statistic("Wrong guesses", 0, Constants.TRIGGER_FOUND_WRONG_IMAGE, False))
     self.statsList.append(Statistic("Total in-game time", 0, Constants.TRIGGER_EXIT_LEVEL, True))
     self.statsList.append(Statistic("Attempted games", 0, Constants.TRIGGER_START_GAME, False))
     self.statsList.append(Statistic("Finished games", 0, Constants.TRIGGER_FINISH_GAME, False))
     self.statsList.append(Statistic("Money earned", 0, Constants.TRIGGER_EARNED_MONEY, False))
     self.statsList.append(Statistic("Money spent", 0, Constants.TRIGGER_SPENT_MONEY, False))
     self.statsList.append(Statistic("Drinks bought", 0, Constants.TRIGGER_BOUGHT_DRINK, False))
     
     self.__saveStats()
Example #10
0
def train(dataset_name, max_epochs=10000, test_period=1):
    # Load dataset
    dataset, image_height, image_width, num_channels, next_train_batch, next_test_batch = load_images(
        dataset_name)

    # setup train, test
    train = dataset.train
    test = dataset.test

    num_train_batches = train.num_examples / BATCH_SIZE
    num_test_batches = test.num_examples / BATCH_SIZE

    with tf.Session() as sess:
        network = Network(sess, image_height, image_width, num_channels)
        # tf.initialize_all_variables().run()

        # TODO make more general
        stat = Statistic(sess, 'mnist', 'train', tf.trainable_variables(),
                         test_period)
        stat.load_model()
        SAMPLE_DIR = os.path.join('samples', 'mnist', 'train')
        initial_step = stat.get_t() if stat else 0

        sampled_images = []
        for epoch in xrange(max_epochs):
            print('Current epoch: %i' % epoch)
            training_costs = []
            for i in xrange(num_train_batches):
                images = binarize(next_train_batch(BATCH_SIZE)).reshape(
                    [BATCH_SIZE, image_height, image_width, num_channels])
                cost = network.test(images, with_update=True)
                training_costs.append(cost)
            # test
            if epoch % test_period == 0:
                print('Running tests...')
                testing_costs = []
                for i in xrange(num_test_batches):
                    images = binarize(next_test_batch(BATCH_SIZE)).reshape(
                        [BATCH_SIZE, image_height, image_width, num_channels])

                    cost = network.test(images, with_update=False)
                    testing_costs.append(cost)
                avg_train_cost = np.average(training_costs)
                avg_test_cost = np.average(testing_costs)
                print('Test cost at epoch %d: %04f' % (epoch, avg_test_cost))
                stat.on_step(avg_train_cost, avg_test_cost)

                samples = network.generate_images(100)
                save_images(samples,
                            image_height,
                            image_width,
                            10,
                            10,
                            directory=SAMPLE_DIR)
Example #11
0
    def getRecentMonthCateStatistic(self, recentNumber):
        year = datetime.date.today().year
        month = datetime.date.today().month
        sumPerc, sumCost = Statistic().getRecentMonthCateStatistic(
            recentNumber)
        allCate = Analysis().getAllCategoryAndId()
        csvStr = ''
        csvStr += '分ē±»'
        csvPerc = ''
        csvPerc += '分ē±»'
        #č¾“å‡ŗč”Ø夓
        for i in range(0, recentNumber):
            thisMonth = month - i
            if thisMonth < 1:
                thisMonth = 12 + thisMonth
                thisYear = year - 1
            else:
                thisYear = year
            csvPerc += ",%s-%s(%%)" % (thisYear, thisMonth)
            csvStr += ",%s-%s(ļæ„)" % (thisYear, thisMonth)
        csvStr += "\n"
        csvPerc += "\n"
        #ꉋåŠØę·»åŠ ę€»č®”åˆ†ē±»
        csvStr += 'ꀻ讔'
        for i in range(0, recentNumber):
            thisMonth = month - i
            if thisMonth < 1:
                thisMonth = 12 + thisMonth
            csvStr += ',' + str(sumCost['ꀻ讔'][thisMonth])
        csvStr += "\n"

        for cate in allCate:
            #å¾ŖēŽÆ分ē±»
            csvStr += "%s" % cate['name']
            csvPerc += "%s" % cate['name']
            for i in range(0, recentNumber):
                #å¾ŖēŽÆęœˆä»½
                thisMonth = month - i
                if thisMonth < 1:
                    thisMonth = 12 + thisMonth

                # print precent of cate
                try:
                    csvPerc += ',' + sumPerc[cate['name']][thisMonth] + '%'
                except KeyError:
                    csvPerc += ',0%'
                # print cost of cate
                try:
                    csvStr += ',' + str(sumCost[cate['name']][thisMonth])
                except KeyError:
                    csvStr += ',0'
            csvStr += '\n'
            csvPerc += "\n"
        return csvStr + '\n' + csvPerc
Example #12
0
def upload_statistics():
    stt = Statistic(source_data, data_frame)
    mode = stt.mode[0][0]
    mode = mode.tolist()
    return render_template('upload_statistics.html',
                           mean=stt.mean,
                           median=stt.median,
                           mode=mode,
                           min=stt.min,
                           max=stt.max,
                           var=stt.var,
                           frame=data_frame,
                           method='Statistics')
Example #13
0
def calc_busload(single_test):
    messages = single_test.dbc.messages
    b11 = 47
    b29 = 65
    bitr = int(single_test.cnf['baudrate'])
    overhead = (b11 if single_test.cnf['id_size'] == 'Force 11 bits' else b29)
    auto_size = False
    if single_test.cnf['id_size'] == 'Auto':
        auto_size = True
    sum_message_load = 0.0
    message_count = 0.0
    bit_stuff = int(single_test.cnf['bit_stuffing']) / 100
    output_list = []
    output_not_used_list = []
    output_ignored = []
    message_time = 0

    for message in messages:
        message_time = 0
        message_load = 0
        if message.name not in single_test.erase_message:
            if message.cycle > 0:
                ml = message.size
                if auto_size:
                    overhead = (b11 if int(message.id) < 2100 else b29)
                message_time = ((ml * 8 + overhead) +
                                ((ml * 8 + overhead - 13) * bit_stuff)) / bitr
                message_load = (message_time / message.cycle)
                sum_message_load += message_load
                message_count += 1
                output_list.append(
                    MessageOut(message.name, message.id, message.size,
                               message.cycle, message_time, message_load))
                event_log(output_list[-1])
            else:
                output_not_used_list.append(
                    MessageOut(message.name, message.id, message.size,
                               message.cycle, message_time, message_load))
                event_log(output_not_used_list[-1])
        else:
            # eventLog("[-] message %s doesn't used in busload calc" % message.name)
            output_not_used_list.append(
                MessageOut(message.name, message.id, message.size,
                           message.cycle, message_time, message_load))
            event_log(output_not_used_list[-1])

    result = Statistic(sum_message_load * 100, message_count, len(messages))

    result_output = [output_list, output_not_used_list, result, output_ignored]
    event_log('---> busload: %0.6f' % sum_message_load, single_test)
    single_test.result = result_output
Example #14
0
def main(_):
  # preprocess
  conf.observation_dims = eval(conf.observation_dims)

  for flag in ['memory_size', 't_target_q_update_freq', 't_test',
               't_ep_end', 't_train_max', 't_learn_start', 'learning_rate_decay_step']:
    setattr(conf, flag, getattr(conf, flag) * conf.scale)

  if conf.use_gpu:
    conf.data_format = 'NCHW'
  else:
    conf.data_format = 'NHWC'

  model_dir = get_model_dir(conf,
      ['use_gpu', 'max_random_start', 'n_worker', 'is_train', 'memory_size', 'gpu_fraction',
       't_save', 't_train', 'display', 'log_level', 'random_seed', 'tag', 'scale'])

  sess_config = tf.ConfigProto(
      log_device_placement=False, allow_soft_placement=conf.allow_soft_placement)
  sess_config.gpu_options.allow_growth = conf.allow_soft_placement

  with tf.Session(config=sess_config) as sess:
    env = AtariEnvironment(conf.env_name, conf.n_action_repeat,
                             conf.max_random_start, conf.observation_dims,
                             conf.data_format, conf.display, conf.use_cumulated_reward)


    pred_network = CNN(sess=sess,
                         data_format=conf.data_format,
                         history_length=conf.history_length,
                         observation_dims=conf.observation_dims,
                         output_size=env.env.action_space.n,
                         network_header_type=conf.network_header_type,
                         name='pred_network', trainable=True)
    target_network = CNN(sess=sess,
                           data_format=conf.data_format,
                           history_length=conf.history_length,
                           observation_dims=conf.observation_dims,
                           output_size=env.env.action_space.n,
                           network_header_type=conf.network_header_type,
                           name='target_network', trainable=False)
    

    stat = Statistic(sess, conf.t_test, conf.t_learn_start, model_dir, pred_network.var.values())
    agent = TrainAgent(sess, pred_network, env, stat, conf, target_network=target_network)

    if conf.is_train:
      agent.train(conf.t_train_max)
    else:
      agent.play(conf.ep_end)
def run_game():
    """čæč”Œå›¾ē‰‡é€‰ę‹©ęøøꈏ"""
    # 初始化屏幕åÆ¹č±”
    pygame.init()
    sp_settings = Settings()
    screen = pygame.display.set_mode(
        (sp_settings.screen_width, sp_settings.screen_height))
    pygame.display.set_caption('Select Picture')
    # gf.generate_pictures(sp_settings)  # ē”Ÿęˆå›¾ē‰‡åŗ“ļ¼ˆé¦–ꬔä½æē”Øę—¶éœ€č¦ļ¼‰
    files = os.listdir('images')  # čŽ·å–imagesäø‹ę‰€ęœ‰å›¾ē‰‡åē§°
    # åŠ č½½ę‰€ęœ‰å›¾ē‰‡
    all_pictures = Group()
    selected_pictures = Group()
    unselected_pictures = Group()
    optional_pictures = Group()
    for file in files:
        pics = Pictures(sp_settings, screen, file)
        pics.update()
        all_pictures.add(pics)
    # 初始化剩余图ē‰‡ę•°é‡
    stats = PicturesStats(len(all_pictures.sprites()))
    gf.choice_pictures(stats, all_pictures, selected_pictures,
                       unselected_pictures, optional_pictures)
    # ꏐē¤ŗäæ”ęÆ
    tips = Tips(sp_settings, screen, stats)
    # ē»Ÿč®”图č”Ø
    st_chart = Statistic(sp_settings, screen)
    # å¼¹å‡ŗē™»å½•ę”†
    user_name = easygui.enterbox('Pls input you name...', title='')
    while not user_name:  # č¾“å…„ę­£ē”®ēš„åå­—ęˆ–ęŒ‰cancelē»“ęŸå¾ŖēŽÆ
        if user_name is None:  # cancel 退å‡ŗē؋åŗ
            sys.exit()
        user_name = easygui.enterbox('Pls input you name...', title='')

    easygui.msgbox('Welcome %s' % (user_name, ))
    st_chart.load_history(user_name + '.txt')
    pygame.display.set_caption(user_name.title() + 'Select Picture ...')
    # ꗠ限å¾ŖēŽÆ
    while True:
        # ē›‘听äŗ‹ä»¶
        gf.check_events(sp_settings, screen, stats, st_chart, all_pictures,
                        user_name, selected_pictures, unselected_pictures,
                        optional_pictures)
        gf.update_help_string(sp_settings, stats, optional_pictures)
        gf.update_tips(tips)
        gf.update_screen(sp_settings, screen, stats, tips, st_chart,
                         all_pictures, selected_pictures, unselected_pictures,
                         optional_pictures)
Example #16
0
    def getRecentMonthRecordDetail(self, recentNumber):
        year = datetime.date.today().year
        month = datetime.date.today().month
        csvStr = ''
        for i in range(0, 3):
            thisMonth = month - i
            if thisMonth < 1:
                thisMonth = 12 + thisMonth
                thisYear = year - 1
            else:
                thisYear = year
            csvStr += "%s-%s\n" % (thisYear, thisMonth)
            csvStr += Statistic().getAnalysisByYearMonthAndRecord(
                thisYear, thisMonth)

        return csvStr
Example #17
0
def main(type='console', path='results/all_print.txt'):
    if type != 'console':
        orig_stdout = sys.stdout
        f = open(path, 'w')
        sys.stdout = f

    stat = Statistic()
    stat.load_data("data.txt")
    # print(stat.get_data())

    gr = Graphic()
    gr.build_hist_and_emp(stat.get_data())
    print(
        "Š“ŠøстŠ¾Š³Ń€Š°Š¼Š¼Š° Šø Š³Ń€Š°Ń„ŠøŠŗ эŠ¼ŠæŠøрŠøчŠµŃŠŗŠ¾Š¹ фуŠ½ŠŗцŠøŠø рŠ°ŃŠæрŠµŠ“ŠµŠ»ŠµŠ½Šøя ŠæŠ¾ŃŃ‚Ń€Š¾ŠµŠ½Ń‹.\n")

    print("Š”рŠµŠ“Š½ŠµŠµ Š·Š½Š°Ń‡ŠµŠ½ŠøŠµ Š²Ń‹Š±Š¾Ń€ŠŗŠø: %.3f" %
          (stat.find_average_sample_value()))
    print("Š’Ń‹Š±Š¾Ń€Š¾Ń‡Š½Š°Ń Š“ŠøсŠæŠµŃ€ŃŠøя: %.3f" % (stat.find_selective_dispersion()))
    print("Š”тŠ°Š½Š“Š°Ń€Ń‚Š½Š°Ń Š¾ŃˆŠøŠ±ŠŗŠ°: %.3f" % (stat.find_standard_error()))
    print("ŠœŠ¾Š“Š°: ", stat.find_mode())
    print("ŠœŠµŠ“ŠøŠ°Š½Š°: ", stat.find_median())
    print("ŠšŠ²Š°Ń€Ń‚ŠøŠ»Šø: ", (stat.find_quartiles()))
    q1, q2, q3 = stat.find_quartiles()
    gr.build_box_plot(stat.get_data(), q1, q2, q3, stat.find_min(),
                      stat.find_max())
    print("ŠÆщŠøŠŗ с усŠ°Š¼Šø ŠæŠ¾ŃŃ‚Ń€Š¾ŠµŠ½. ")
    print("Š”тŠ°Š½Š“Š°Ń€Ń‚Š½Š¾Šµ Š¾Ń‚ŠŗŠ»Š¾Š½ŠµŠ½Šø: %.3f" % (stat.find_standard_deviation()))
    print("Š­ŠŗсцŠµŃŃ: %.3f" % (stat.find_kurtosis()))
    print("ŠŃŠøŠ¼Š¼ŠµŃ‚Ń€ŠøчŠ½Š¾ŃŃ‚ŃŒ: %.3f (%s)" % (stat.find_skewness()))
    print("ŠœŠøŠ½ŠøŠ¼ŃƒŠ¼: ", (stat.find_min()))
    print("ŠœŠ°ŠŗсŠøŠ¼ŃƒŠ¼: ", (stat.find_max()))

    print("\nŠŸŃ€Š¾Š²ŠµŃ€ŠŗŠ° Š³ŠøŠæŠ¾Ń‚ŠµŠ·Ń‹ H_0:")
    stat.pearson_criterion()

    print("\nŠ”Š¾Š²ŠµŃ€ŠøтŠµŠ»ŃŒŠ½Ń‹Š¹ ŠøŠ½Ń‚ŠµŃ€Š²Š°Š» Š¼Š°Ń‚Š¾Š¶ŠøŠ“Š°Š½Šøя: (%.3f; %.3f)" %
          (stat.expected_value_interval()))
    print("\nŠ”Š¾Š²ŠµŃ€ŠøтŠµŠ»ŃŒŠ½Ń‹Š¹ ŠøŠ½Ń‚ŠµŃ€Š²Š°Š» срŠµŠ“Š½ŠµŠŗŠ²Š°Š“рŠ°Ń‚ŠøчŠ½Š¾Š³Š¾ "
          "Š¾Ń‚ŠŗŠ»Š¾Š½ŠµŠ½Šøя: (%.3f; %.3f)" % (stat.standard_deviation_interval()))

    if type != 'console':
        sys.stdout = orig_stdout
        f.close()
Example #18
0
def get_vacancies(url):
    """
    Get all vacancies data from the provided URL.
    """
    vacancies = []
    resp = do_request(url)

    # Append 'vacancies' with data from zero page
    vacancies.append(json.loads(resp.text))

    # Set new statistics value of vacancies
    found_vacancies = vacancies[0]['found']
    stat = Statistic(found_vacancies)

    # Pagination, go through all available pages if response had more than one page
    pages = int(vacancies[0]['pages'])
    if pages > 0:
        for page in range(1, pages):
            resp = do_request(url + '&page=' + str(page))
            # Append 'vacancies' with data from the current page
            vacancies.append(json.loads(resp.text))
    return vacancies, stat
Example #19
0
    def _get_not_uniformed_axis(self, spheres, current_axis):
        print(f"\nCheck uniform distributions for axises: {current_axis} start.") 
        axis_to_check = copy.deepcopy(current_axis)
        stat = Statistic()

        uniform_approves = []

        for axis in axis_to_check:
            axis_arr = []

            for figure in spheres:
                axis_arr.append(figure[axis])

            dist = stat.get_single_axis_distribution(axis_arr, self.ranges[axis], pocket_count=self.pocket_count)
            is_axis_uniform = abs(np.polyfit(np.arange(self.pocket_count), dist, 1)[0]) < 0.2
            print(f"Is axis {axis} uniformed: {is_axis_uniform}")
            uniform_approves.append(is_axis_uniform)

            if is_axis_uniform:
                current_axis.remove(axis)
        
        is_fully_uniform = all(uniform_approves)
        print(f"Check uniform distributions for axises: {axis_to_check} done") 
        return is_fully_uniform, current_axis
Example #20
0
            samples[:, i, j] = next_sample[:, i, j]

    return samples


# ### Training
#
# Now we start the actual training process. We initialize Tensorflow and load a Statistics class to keep track of the model and score statistics as we train.
#
# Then, we go through our training epochs, generating a series of images, evaluating the loss and optimizing accordingly. Every epoch we also test the model by producing sample images and evaluating the performance on some test data. Finally, once training is finished, we generate the images to display.

# In[9]:

# with tf.Session() as sess:
sess = tf.Session()
stat = Statistic(sess, p.data, model_dir, tf.trainable_variables(),
                 p.test_step)
stat.load_model()
init = tf.global_variables_initializer()

sess.run(init)
stat.start()
print("Start training")

initial_step = stat.get_t() if stat else 0
# iterator = trange(p.max_epoch, ncols=70, initial=initial_step)
iterator = tqdm(range(p.max_epoch))

for epoch in iterator:
    # print('Start epoch')
    # 1. train
    total_train_costs = []
Example #21
0
 def __init__(self):
     self.time = Statistic('Time')
     self.discounted_return = Statistic('discounted return')
     self.undiscounted_return = Statistic('undiscounted return')
Example #22
0
def run_cnn(p1, p2, use_swapout, use_residual, max_epoch):
    hyperparams = {  # network
        "model": "pixel_cnn",  # name of model [pixel_rnn, pixel_cnn]
        "batch_size": 100,  # size of a batch
        "hidden_dims": 16,  # dimesion of hidden states of LSTM or Conv layers
        "recurrent_length": 7,  # the length of LSTM or Conv layers
        "out_hidden_dims":
        32,  # dimesion of hidden states of output Conv layers
        "out_recurrent_length": 2,  # the length of output Conv layers
        "use_residual":
        use_residual,  # whether to use residual connections or not
        "use_dynamic_rnn": False,  # whether to use dynamic_rnn or not
        "use_swapout": use_swapout,  # whether to use swapout or not
        "p1": p1,  # p1 in swapout
        "p2": p2,  # p2 in swapout

        # training
        "max_epoch": max_epoch,  # # of step in an epoch
        "test_step": 10,  # # of step to test a model
        "save_step": 5,  # # of step to save a model
        "learning_rate": 1e-3,  # learning rate
        "grad_clip": 1,  # value of gradient to be used for clipping
        "use_gpu": True,  # whether to use gpu for training

        # data
        "data": "mnist",  # name of dataset 
        "data_dir": "MNIST-data",  # name of data directory
        "sample_dir": "samples",  # name of sample directory

        # Debug
        "is_train": True,  # training or testing
        "display": False,  # whether to display the training results or not
        "random_seed": 123  # random seed for python
    }
    p = dotdict(hyperparams)

    if "random_seed" in p:
        tf.set_random_seed(p.random_seed)
        np.random.seed(p.random_seed)

    # TODO add hyperparams to model saving
    model_dir = setup_model_saving(p.model, p.data, hyperparams)
    DATA_DIR = p.data_dir
    SAMPLE_DIR = os.path.join(model_dir, p.sample_dir)

    check_and_create_dir(DATA_DIR)
    check_and_create_dir(SAMPLE_DIR)

    # prepare dataset
    from tensorflow.examples.tutorials.mnist import input_data
    mnist = input_data.read_data_sets(DATA_DIR, one_hot=True)

    next_train_batch = lambda x: mnist.train.next_batch(x)[0]
    next_test_batch = lambda x: mnist.test.next_batch(x)[0]

    height, width, channel = 28, 28, 1

    train_step_per_epoch = mnist.train.num_examples // p.batch_size
    test_step_per_epoch = mnist.test.num_examples // p.batch_size

    def pixelRNN(height, width, channel, params):
        """
        Args
        height, width, channel - the dimensions of the input
        params -- the hyperparameters of the network
        """
        input_shape = [None, height, width, channel
                       ] if params.use_gpu else [None, channel, height, width]
        inputs = tf.placeholder(tf.float32, input_shape)

        # input of main convolutional layers
        scope = "conv_inputs"
        conv_inputs = conv2d(inputs,
                             params.hidden_dims, [7, 7],
                             "A",
                             scope=scope)

        # main convolutions layers
        last_hid = conv_inputs
        for idx in range(params.recurrent_length):
            scope = 'CONV%d' % idx
            if params.use_swapout and idx > 0:
                theta1 = params.p1
                theta2 = params.p2
                noise_shape = array_ops.shape(last_hid)

                mask1 = math_ops.floor(theta1 + random_ops.random_uniform(
                    noise_shape, seed=params.random_seed, dtype=last_hid.dtype)
                                       )
                mask2 = math_ops.floor(theta2 + random_ops.random_uniform(
                    noise_shape,
                    seed=None if params.random_seed ==
                    None else params.random_seed * 2,
                    dtype=last_hid.dtype))

                last_hid = last_hid * mask1 + conv2d(
                    last_hid, 3, [1, 1], "B", scope=scope) * mask2
            else:
                last_hid = conv2d(last_hid, 3, [1, 1], "B", scope=scope)
            print("Building %s" % scope)

        # output convolutional layers
        for idx in range(params.out_recurrent_length):
            scope = 'CONV_OUT%d' % idx
            last_hid = tf.nn.relu(
                conv2d(last_hid,
                       params.out_hidden_dims, [1, 1],
                       "B",
                       scope=scope))
            print("Building %s" % scope)

        conv2d_out_logits = conv2d(last_hid,
                                   1, [1, 1],
                                   "B",
                                   scope='conv2d_out_logits')
        output = tf.nn.sigmoid(conv2d_out_logits)
        return inputs, output, conv2d_out_logits

    inputs, output, conv2d_out_logits = pixelRNN(height, width, channel, p)

    loss = tf.reduce_mean(
        tf.nn.sigmoid_cross_entropy_with_logits(logits=conv2d_out_logits,
                                                labels=inputs,
                                                name='loss'))

    optimizer = tf.train.RMSPropOptimizer(p.learning_rate)
    grads_and_vars = optimizer.compute_gradients(loss)

    new_grads_and_vars = \
        [(tf.clip_by_value(gv[0], -p.grad_clip, p.grad_clip), gv[1]) for gv in grads_and_vars]
    optim = optimizer.apply_gradients(new_grads_and_vars)

    # show_all_variables()
    print("Building %s finished!" % p.model)

    def predict(sess, images, inputs, output):
        return sess.run(output, {inputs: images})

    def generate_occlusions(sess, height, width, inputs, output):
        samples = occlude(images, height, width)
        starting_position = [0, height // 2]
        for i in range(starting_position[1], height):
            for j in range(starting_position[0], width):
                next_sample = binarize(predict(sess, samples, inputs, output))
                samples[:, i, j] = next_sample[:, i, j]
        return samples

    def generate(sess, height, width, inputs, output):
        samples = np.zeros((100, height, width, 1), dtype='float32')

        for i in range(height):
            for j in range(width):
                next_sample = binarize(predict(sess, samples, inputs, output))
                samples[:, i, j] = next_sample[:, i, j]

        return samples

    # with tf.Session() as sess:
    sess = tf.Session()
    stat = Statistic(sess, p.data, model_dir, tf.trainable_variables(),
                     p.test_step)
    stat.load_model()
    init = tf.global_variables_initializer()

    sess.run(init)
    stat.start()
    print("Start training")

    initial_step = stat.get_t() if stat else 0
    # iterator = trange(p.max_epoch, ncols=70, initial=initial_step)
    iterator = tqdm(range(p.max_epoch))

    check_and_create_dir('errors')

    # Eric - define the filename where we store errors
    err_filename = 'errors/error_file_{}.csv'.format(get_timestamp())

    # Eric - writes params to first two lines of file
    with open(err_filename, 'a') as errorFile:
        errorWriter = csv.writer(errorFile, delimiter=',')
        errorWriter.writerow([
            'model', 'max_epoch', 'use_swapout', 'use_residual', 'swapout_p1',
            'swapout_p2'
        ])
        errorWriter.writerow(
            [p.model, p.max_epoch, p.use_swapout, p.use_residual, p.p1, p.p2])
    errorFile.close()

    # train and test iterations
    for epoch in iterator:
        # print('Start epoch')
        # 1. train
        total_train_costs = []
        for idx in range(train_step_per_epoch):
            images = binarize(next_train_batch(p.batch_size)) \
                .reshape([p.batch_size, height, width, channel])

            _, cost = sess.run([optim, loss], feed_dict={inputs: images})
            total_train_costs.append(cost)
        # print('Start testing')
        # 2. test
        total_test_costs = []
        for idx in range(test_step_per_epoch):
            images = binarize(next_test_batch(p.batch_size)) \
                .reshape([p.batch_size, height, width, channel])

            cost = sess.run(loss, feed_dict={inputs: images})
            total_test_costs.append(cost)

        avg_train_cost, avg_test_cost = np.mean(total_train_costs), np.mean(
            total_test_costs)

        # Eric - print stats for each iteration
        print("Epoch: {}, train l: {}, test l: {}".format(
            epoch, avg_train_cost, avg_test_cost))

        stat.on_step(avg_train_cost, avg_test_cost, err_filename)

    # print('Start generation')
    # 3. generate samples
    samples = generate_occlusions(sess, height, width, inputs, output)
    path = save_images(samples,
                       height,
                       width,
                       10,
                       10,
                       directory=SAMPLE_DIR,
                       prefix="epoch_%s" % epoch)
    iterator.set_description("train loss: %.3f, test loss: %.3f" %
                             (avg_train_cost, avg_test_cost))

    # with tf.Session() as sess:
    samples = generate_occlusions(sess, height, width, inputs, output)
    save_images(samples, height, width, 10, 10, directory=SAMPLE_DIR)

    from IPython.display import Image
    fname = save_images(samples, height, width, 10, 10, directory=SAMPLE_DIR)
    Image(filename=fname)
    tf.reset_default_graph()
    sess.close()
Example #23
0
        if len(words_counter) == 0:
            raise EndOfTextError()
        value = random.randrange(sum(words_counter.values()))
        freq_sum = 0
        for item in words_counter.items():
            freq_sum += item[1]
            if freq_sum >= value:
                return item[0]


if __name__ == '__main__':
    statistic_dir = sys.argv[1]

    with open(statistic_dir + "\words.txt") as input_file:
        words_counter = json.load(input_file)
    with open(statistic_dir + "\pairs.txt") as input_file:
        pairs_dictionary = json.load(input_file)
    with open(statistic_dir + "\\triplets.txt") as input_file:
        triplets_dictionary = json.load(input_file)

    statistic = Statistic(words_counter, pairs_dictionary, triplets_dictionary)

    text_len = int(sys.argv[2])
    line_len = int(sys.argv[3])
    sentences_in_paragraph = int(sys.argv[4])

    generator = TextGenerator(statistic, text_len, line_len,
                              sentences_in_paragraph)
    text = generator.gen()
    print text
Example #24
0
def main(_):
    model_dir = get_model_dir(conf, [
        'data_dir', 'sample_dir', 'max_epoch', 'test_step', 'save_step',
        'is_train', 'random_seed', 'log_level', 'display'
    ])
    preprocess_conf(conf)

    DATA_DIR = os.path.join(conf.data_dir, conf.data)
    SAMPLE_DIR = os.path.join(conf.sample_dir, conf.data, model_dir)

    check_and_create_dir(DATA_DIR)
    check_and_create_dir(SAMPLE_DIR)

    # 0. prepare datasets
    if conf.data == "mnist":
        from tensorflow.examples.tutorials.mnist import input_data
        mnist = input_data.read_data_sets(DATA_DIR, one_hot=True)

        next_train_batch = lambda x: mnist.train.next_batch(x)[0]
        next_test_batch = lambda x: mnist.test.next_batch(x)[0]

        height, width, channel = 28, 28, 1

        train_step_per_epoch = mnist.train.num_examples // conf.batch_size
        test_step_per_epoch = mnist.test.num_examples // conf.batch_size
    elif conf.data == "cifar":
        from cifar10 import IMAGE_SIZE, inputs

        maybe_download_and_extract(DATA_DIR)
        images, labels = inputs(eval_data=False,
                                data_dir=os.path.join(DATA_DIR,
                                                      'cifar-10-batches-bin'),
                                batch_size=conf.batch_size)

        height, width, channel = IMAGE_SIZE, IMAGE_SIZE, 3

    with tf.Session() as sess:
        network = Network(sess, conf, height, width, channel)

        stat = Statistic(sess, conf.data, model_dir, tf.trainable_variables(),
                         conf.test_step)
        stat.load_model()

        if conf.is_train:
            logger.info("Training starts!")

            initial_step = stat.get_t() if stat else 0
            iterator = trange(conf.max_epoch, ncols=70, initial=initial_step)

            for epoch in iterator:
                print('epoch', epoch)
                # 1. train
                total_train_costs = []
                for idx in range(train_step_per_epoch):
                    images = binarize(next_train_batch(conf.batch_size)) \
                      .reshape([conf.batch_size, height, width, channel])

                    cost = network.test(images, with_update=True)
                    total_train_costs.append(cost)

                # 2. test
                total_test_costs = []
                for idx in range(test_step_per_epoch):
                    images = binarize(next_test_batch(conf.batch_size)) \
                      .reshape([conf.batch_size, height, width, channel])

                    cost = network.test(images, with_update=False)
                    total_test_costs.append(cost)

                avg_train_cost, avg_test_cost = np.mean(
                    total_train_costs), np.mean(total_test_costs)

                stat.on_step(avg_train_cost, avg_test_cost)

                # 3. generate samples
                samples = network.generate()
                save_images(samples,
                            height,
                            width,
                            10,
                            10,
                            directory=SAMPLE_DIR,
                            prefix="epoch_%s" % epoch)

                print("train l: %.3f, test l: %.3f" %
                      (avg_train_cost, avg_test_cost))
                iterator.set_description("train l: %.3f, test l: %.3f" %
                                         (avg_train_cost, avg_test_cost))
        else:
            logger.info("Image generation starts!")

            samples = network.generate()
            save_images(samples, height, width, 10, 10, directory=SAMPLE_DIR)
Example #25
0
#!/usr/bin/python3
import os
import time
from datetime import datetime

import settings as cfg
from log import init_log
from statistic import Statistic

if __name__ == "__main__":
    log = init_log(log_name="Main", log_level=cfg.LOG_LEVEL)
    counter = 0
    stat = Statistic(maxlen=20, log_level=cfg.LOG_LEVEL)
    while True:
        with open("/sys/class/thermal/thermal_zone0/temp") as reader:
            try:
                text = reader.read()
                temperature = int(text.strip())
            except Exception as e:
                log.error(f"Exception : {e}")
        sample_time = datetime.now().strftime("%m/%d/%Y %H:%M:%S")
        stat.add(temperature, sample_time)
        counter += 1
        if counter % (cfg.WRITE_INTERVAL // cfg.READ_INTERVAL) == 0:
            log.info(f"Write new report...{counter}")
            report = stat.report()
            with open(cfg.RESULT_FILE, 'w') as writer:
                writer.write(report)
        time.sleep(cfg.READ_INTERVAL)
 def testConstructor(self):
     if IGNORE_TEST:
         return
     statistic = Statistic(self.shim)
     self.assertIsNotNone(statistic._shim)
Example #27
0
        kernel = np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]])

        p99 = stat.get_percentile(scharr, 99)

        print(p99)
        _, scharr_th = cv.threshold(scharr, p99, 255, cv.THRESH_BINARY)

        cv.imshow('original_bgr', bgr)
        # cv.imshow('original_bg', bg)
        cv.imshow('gray', gray)
        # cv.imshow('sobel', sobel)
        cv.imshow('scharr', scharr)
        # cv.imshow('sobel_th', sobel_th)
        cv.imshow('scharr_th', scharr_th)

        k = cv.waitKey(1) & 0xff
        if k == ord('q'):
            break
        histr = cv.calcHist([scharr], [0], None, [256], [0, 256])
        plt.plot(histr, color='blue')
        plt.pause(0.001)
        plt.clf()
    plt.close()
    cap.release()
    cv.destroyAllWindows()


if __name__ == '__main__':
    stat = Statistic()
    gate()
Example #28
0
if jenkins_url is None:
    print('Specify JENKINS_URL as environment variable.')
    sys.exit(1)
if jenkins_user is None:
    print('Specify JENKINS_USER as environment variable')
    sys.exit(1)
if jenkins_user_token is None:
    print('Specify JENKINS_USER_TOKEN as environment variable')
    sys.exit(1)

line_bot_api = LineBotApi(channel_access_token)
handler = WebhookHandler(channel_secret)
test_result = TestResult()
run_test = RunTest()
jenkins = Jenkins()
statistic = Statistic()

static_tmp_path = os.path.join(os.path.dirname(__file__), 'static', 'tmp')


@app.route("/callback", methods=['POST'])
def callback():
    # get X-Line-Signature header value
    signature = request.headers['X-Line-Signature']

    # get request body as text
    body = request.get_data(as_text=True)
    app.logger.info("Request body: " + body)

    # handle webhook body
    try:
Example #29
0
def _statistic(args: argparse.Namespace):
    exts = get_extensions(args.languages_file)
    stats = Statistic(args.email_notify, args.target_directory)
    stats.gather_statistic(args.modes, args.languages_file, exts)
Example #30
0
 def test_statistic_methods(self):
     """Test mean and variance methods """
     object1 = Statistic(numbers=[1, 2], confidence=0.95)
     self.assertEqual(object1.mean(), 1.5)
     self.assertEqual(object1.variance(), 0.5)