Exemple #1
0
def main(unused_argv=None):

    # 毎回チェックポイントデータを消すのは面倒なので、保存ディレクトリを実行ごとに生成する
    model_dir = '../models/dogcat/' + str(uuid.uuid1()).split('-')[0]

    input = Input()

    data = input.get()
    model_dir = '../models/dogcat/'
    # Estimatorのインスタンス化
    classifier = tf.estimator.Estimator(model_fn=cnn_model_fn,
                                        model_dir=model_dir)

    tensors_to_log = {"probabilities": "softmax_tensor"}
    logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log,
                                              every_n_iter=50)
    '''
    # train
    classifier.train(
        input_fn=lambda:input.input_fn(data['train_fnames'], data['train_labels']),
        steps=20000,
        hooks=[logging_hook])

    # eval
    eval_results = classifier.evaluate(
        input_fn=lambda:input.eval_input_fn(data['val_fnames'], data['val_labels']),
        steps=1000,
        hooks=[logging_hook])

    print(eval_results)
    '''

    # Predict
    '''
    predictメソッドの返り値例
    74行目くらいで指定した通りの値が返る
    {'classes': 1, 'probabilities': array([0.04826169, 0.9517383 ], dtype=float32)}
    '''

    data = {'test_fnames': ['../datasets/tmp/c.jpg'], 'test_labels': [0]}

    predictions = classifier.predict(
        input_fn=lambda: input.eval_input_fn(data['test_fnames']))

    print('0: Cat, 1: Dog')
    for i, pred in enumerate(predictions):
        print('File: {0}, 予想: {1}, 正解: {2}, 確率: {3}%, {4}'.format(
            data['test_fnames'][i].split('/')[-1], pred['classes'],
            data['test_labels'][i], round(np.max(pred['probabilities']) * 100),
            '正' if pred['classes'] == data['test_labels'][i] else '誤'))
Exemple #2
0
def build_input_feed_dict(graph, bucket, config):
    """Build input and feed_dict for bucket(inference only)
    """
    # mapping placeholders
    p_is_train = graph.get_tensor_by_name('prefix/is_train:0')
    p_sentence_length = graph.get_tensor_by_name('prefix/sentence_length:0')
    p_input_data_pos_ids = graph.get_tensor_by_name(
        'prefix/input_data_pos_ids:0')
    p_input_data_chk_ids = graph.get_tensor_by_name(
        'prefix/input_data_chk_ids:0')
    p_input_data_word_ids = graph.get_tensor_by_name(
        'prefix/input_data_word_ids:0')
    p_input_data_wordchr_ids = graph.get_tensor_by_name(
        'prefix/input_data_wordchr_ids:0')
    if 'elmo' in config.emb_class:
        p_elmo_input_data_wordchr_ids = graph.get_tensor_by_name(
            'prefix/elmo_input_data_wordchr_ids:0')
    if 'bert' in config.emb_class:
        p_bert_input_data_token_ids = graph.get_tensor_by_name(
            'prefix/bert_input_data_token_ids:0')
        p_bert_input_data_token_masks = graph.get_tensor_by_name(
            'prefix/bert_input_data_token_masks:0')
        p_bert_input_data_segment_ids = graph.get_tensor_by_name(
            'prefix/bert_input_data_segment_ids:0')
        if 'elmo' in config.emb_class:
            p_bert_input_data_elmo_indices = graph.get_tensor_by_name(
                'prefix/bert_input_data_elmo_indices:0')

    inp = Input(bucket, config, build_output=False)
    feed_dict = {
        p_input_data_pos_ids: inp.example['pos_ids'],
        p_input_data_chk_ids: inp.example['chk_ids'],
        p_is_train: False,
        p_sentence_length: inp.max_sentence_length
    }
    feed_dict[p_input_data_word_ids] = inp.example['word_ids']
    feed_dict[p_input_data_wordchr_ids] = inp.example['wordchr_ids']
    if 'elmo' in config.emb_class:
        feed_dict[p_elmo_input_data_wordchr_ids] = inp.example[
            'elmo_wordchr_ids']
    if 'bert' in config.emb_class:
        feed_dict[p_bert_input_data_token_ids] = inp.example['bert_token_ids']
        feed_dict[p_bert_input_data_token_masks] = inp.example[
            'bert_token_masks']
        feed_dict[p_bert_input_data_segment_ids] = inp.example[
            'bert_segment_ids']
        if 'elmo' in config.emb_class:
            feed_dict[p_bert_input_data_elmo_indices] = inp.example[
                'bert_elmo_indices']
    return inp, feed_dict
Exemple #3
0
    def test_compareresults(self):
        # keyWordList = ["science", "Computer"]
        keyWordList = []
        keyWord = "science"
        filePath = os.path.join(dirPath, "testcases/Test1/ListOfFiles.in")
        filePathTitle = os.path.join(dirPath, "testcases/Test1")

        # print(filePath)
        outputTestPath = os.path.join(dirPath, "testcases/Test1/Output1.txt")
        # os.remove("Output1.txt")
        for i in range(2):
            if len(keyWordList) == 0:
                keyWordList.append(keyWord)
                self.input = Input(filePath, keyWordList, filePathTitle)
            else:
                keyWord = "Computer"
                keyWordList.append(keyWord)
                if len(keyWordList) > 1:
                    with open(outputPath, "w") as file:
                        file.close()
                self.input = Input(filePath, keyWordList, filePathTitle)
        self.assertTrue(filecmp.cmp(outputPath, outputTestPath, shallow=False))
        os.remove("Output1.txt")
Exemple #4
0
def main():
    input = Input()
    trainX, trainY, testX, testY = input.init()
    trainNum = trainX.shape[0]
    testNum = testX.shape[0]

    D = np.zeros((trainNum))
    for i in range(trainNum):
        D[i] = 1 / trainNum

    T = 50

    trainPredict = np.zeros((T, trainNum))
    testPredict = np.zeros((T, testNum))
    alpha = np.zeros(T)
    cor = np.zeros(T)
    error = np.zeros(T)
    clfs = list()
    for i in range(T):
        print('i = ', i)
        clf = tree.DecisionTreeClassifier(min_samples_split=3)
        clf = clf.fit(trainX, trainY, sample_weight=D)
        clfs.append(clf)

        trainPredict[i, :] = clf.predict(trainX)
        cor[i] = compare(trainPredict[i, :], trainY, trainNum)
        #	print('correct num = ', cor[i])

        error[i], ok = calc_error(trainPredict[i, :], trainY, D)
        if not ok:
            T = i
            break
        D, alpha[i] = update(trainPredict[i, :], trainY, error[i], D)

    output(cor, error, alpha)
    '''
	result = weighted(alpha, trainPredict, T, trainNum)
	correct = compare(result, trainY, trainNum)
	print('train correct num = ', correct, ', ratio = ', correct/32561)
	'''

    for i in range(T):
        testPredict[i, :] = clfs[i].predict(testX)

    weightedSum, result = weighted(alpha, testPredict, T, testNum)
    correct = compare(result, testY, testNum)
    print('test correct num = ', correct, ', ratio = ', correct / 16281)

    auc = roc_auc_score(testY, weightedSum)
    print('auc = ', auc)
Exemple #5
0
def train(config):
    """Prepare input data(train, dev), model and fit
    """

    # build input train and dev data
    train_file = 'data/train.txt'
    dev_file = 'data/dev.txt'
    '''for KOR
    train_file = 'data/kor.train.txt'
    dev_file = 'data/kor.dev.txt'
    '''
    '''for KOR nbest
    train_file = 'data/kor.nbest.train.txt'
    dev_file = 'data/kor.nbest.dev.txt'
    '''
    '''for CRZ
    train_file = 'data/cruise.train.txt.in'
    dev_file = 'data/cruise.dev.txt.in'
    '''
    train_data = Input(train_file,
                       config,
                       build_output=True,
                       do_shuffle=True,
                       reuse=False)
    dev_data = Input(dev_file, config, build_output=True, reuse=False)
    tf.logging.debug('loading input data ... done')
    config.update(train_data)
    tf.logging.debug('config.num_train_steps = %s' % config.num_train_steps)
    tf.logging.debug('config.num_warmup_epoch = %s' % config.num_warmup_epoch)
    tf.logging.debug('config.num_warmup_steps = %s' % config.num_warmup_steps)

    # create model and compile
    model = Model(config)
    model.compile()

    # do actual training
    fit(model, train_data, dev_data)
Exemple #6
0
def test_taillard_20_czasy():
    x = range(1, 11, 1)
    ox_label = "Nr testu [1..10]"
    oy_label = "Czas wykonania danego algorytmu [s]"
    legend = ["Cuckoo", "NEH", "Random100", "Tabu100", "NEHTabu100"]
    title = "Taillard 20 jobs 10 machines - porownanie wynikow"

    naive = [1.55, 1.44, 1.64, 1.66, 1.29, 1.27, 1.18, 1.34, 1.37, 1.28]
    neh = []
    random100 = []
    tabu100 = []
    nehtabu100 = []

    for test_number in range(1, 11):
        print "test = {}".format(test_number)
        user_input = Input()
        user_input.load_from_file(
            "testy/Taillard_20jobs_10machines_{}.txt".format(test_number))
        solver = Solver(user_input)

        start = time.time()
        a = solver.neh_algorithm()[1]
        end = time.time()
        neh.append(end - start)

        start = time.time()
        for iteration in range(0, 5):
            a = solver.random_search_approach(100)[1]
        end = time.time()

        random100.append((end - start) / 5.0)

        start = time.time()
        for iteration in range(0, 5):
            a = solver.tabu_search_approach(100, 5, 3)[1]
        end = time.time()

        tabu100.append((end - start) / 5.0)

        start = time.time()
        for iteration in range(0, 5):
            a = solver.tabu_search_with_neh(100, 5, 3)[1]
        end = time.time()

        nehtabu100.append((end - start) / 5.0)

    y = [naive, neh, random100, tabu100, nehtabu100]
    plot_graph(x, y, ox_label, oy_label, legend, title,
               "test_taillard_20_czasy.png")
Exemple #7
0
    def __init__(self):
        self.window = curses.initscr()
        size = self.window.getmaxyx()
        self.board = Board(size[1], size[0] - 1)
        self.snake = Snake((int(size[1] / 2), int(size[0] / 2)))
        self.apple = Apple(size[1], size[0] - 1)
        self.input = Input()
        curses.noecho()
        curses.cbreak()
        curses.curs_set(0)
        self.window.keypad(True)
        self.window.nodelay(True)
        self.window.scrollok(False)

        self.gameover = False
Exemple #8
0
    def __init__(self, params, logger):
        """
        :type params: Tester.Params
        :type logger: CustomLogger
        :rtype: None
        """

        self._params = params
        self._logger = logger

        self.input = Input(self._params.input, self._logger)

        self.annotations = None

        self._acc_dict = {}
Exemple #9
0
def train(config):
    # build input data
    train_file = 'data/train.txt'
    dev_file = 'data/dev.txt'
    '''KOR
    train_file = 'data/kor.train.txt'
    dev_file = 'data/kor.dev.txt'
    '''
    train_data = Input(train_file, config, build_output=True)
    dev_data = Input(dev_file, config, build_output=True)
    print('loading input data ... done')

    # set for bert optimization
    if config.emb_class == 'bert' and config.use_bert_optimization:
        config.num_train_steps = int(
            (len(train_data.sentence_tags) / config.batch_size) * config.epoch)
        config.num_warmup_steps = int(config.num_train_steps *
                                      config.warmup_proportion)

    # create model
    model = Model(config)

    # training
    do_train(model, config, train_data, dev_data)
Exemple #10
0
    def start(self):
        self.map.load()
        self._entity_map = {}
        self._position_map = {}
        self._entities = {}
        self._registered = {}
        self._enemySpawns = {}
        for x, y in self.map.getMap().keys():
            self._position_map[(x, y)] = []

        self._total_surface = Surface((self.map.w, self.map.h))
        tid = self.addEntity(register=True,
                             entity=MChar(self,
                                          self.map.getType(Tiles.Start)[0],
                                          inputStream=self.getInputStream()))
        self._camera = Viewport(
            tuple([s * const.res for s in const.screenSize]),
            lambda: self.map.getAttr("scale"), self.get(tid),
            (150, 200, 150, 200), self.map)
        self._background = Parallax(const.backgrounds)
        self.editor = Editor(self.map,
                             self._surface,
                             enabled=False,
                             inputStream=self.getInputStream())

        self._input = Input(inputStream=self.getInputStream())
        self._input.set(KEYDOWN, self.editor.toggleEnabled, K_e)
        self._input.set(KEYDOWN, self.start, K_r)

        # self._sound = Sound("assets\\music.ogg")
        # self._sound.play(-1)

        try:
            self._healthBar = HealthBar(10, 10, self.get(tid))
        except AssertionError:
            pass

        for (x, y), val in self.map.enemies.items():
            block = self.map.get(x, y)
            self._enemySpawns[block] = EnemySpawn(level=self,
                                                  anchor=Object(pos=(block.x,
                                                                     block.y)),
                                                  maxEmitted=val,
                                                  timeBetween=2)

        self._countdown = CountdownTimer(const.screenSize[0] * const.res - 50,
                                         10, self.map.getAttr("timeLim"))
Exemple #11
0
  def mine(self, blockChain, users, confirmedTransactions, unconfirmedTransactions, transactionsInOrder, difficulty):
    nonce = 0
    if len(blockChain) > 0:
      last = blockChain[-1]
      lastHash = last.get_hash()
    else:
      lastHash = "".join([ "0" for x in range(64) ])

    while True:
      while True:
        hasher = hashlib.sha256();
        base = f"{lastHash}{nonce}"
        hasher.update(base.encode('utf-8'))
        if int(hasher.hexdigest(), 16) < 2 ** difficulty:
          break
        nonce += 1
      if sum([ 1 if x.get_hash() == hasher.hexdigest() else 0 for x in blockChain ]) == 0:
        break
      nonce += 1
    for transaction in confirmedTransactions:
      if not transaction.verify(users):
        raise Exception("Fatal error transaction is invalid/modified.")
    print(f"Transactions are verified for block {len(blockChain) + 1} transaction.")
    for i in range(len(blockChain)):
      if not blockChain[i].validate(confirmedTransactions, transactionsInOrder[i]):
        raise Exception("Fatal error blockchain is in invalid state.")
    print(f"BlockChain validation successful for block {len(blockChain) + 1}.")
    unconfirmedTransactions.append(
      Transaction(
        inputs=[
          Input(
            coinbase=base,
            signature=self.sign("".join([ '0' for i in range(64) ]).encode('utf8')).hex(),
            publicKey=self.verifyingKey.to_string().hex()
          )
        ],
        outputs=[
          Output(
            amount=5000000000
          )
        ]
      )
    )
    blockChain.append(Block(lastHash, hasher.hexdigest(), nonce, unconfirmedTransactions))
    transactionsInOrder.append([ len(confirmedTransactions) + i for i in range(len(unconfirmedTransactions)) ])
    confirmedTransactions += list(unconfirmedTransactions)
    unconfirmedTransactions.clear()
Exemple #12
0
def inference_bulk(config):
    """Inference for test file
    """

    # Build input data
    test_file = 'data/test.txt'
    test_data = Input(test_file, config)
    print('max_sentence_length = %d' % test_data.max_sentence_length)
    print('loading input data ... done')

    # Create model
    model = Model(config)

    session_conf = tf.ConfigProto(allow_soft_placement=True,
                                  log_device_placement=False)
    sess = tf.Session(config=session_conf)
    with sess.as_default():
        sess.run(tf.global_variables_initializer())
        saver = tf.train.Saver()
        saver.restore(sess, config.restore)
        print('model restored')
        feed_dict = {
            model.input_data_word_ids: test_data.sentence_word_ids,
            model.input_data_wordchr_ids: test_data.sentence_wordchr_ids,
            model.input_data_pos_ids: test_data.sentence_pos_ids,
            model.input_data_etc: test_data.sentence_etc,
            model.output_data: test_data.sentence_tag
        }
        logits, logits_indices, trans_params, output_data_indices, length, test_loss = \
                     sess.run([model.logits, model.logits_indices, model.trans_params, model.output_data_indices, model.length, model.loss], feed_dict=feed_dict)
        print('test precision, recall, f1(token): ')
        TokenEval.compute_f1(config.class_size, logits, test_data.sentence_tag,
                             length)
        if config.use_crf:
            viterbi_sequences = viterbi_decode(logits, trans_params, length)
            tag_preds = test_data.logits_indices_to_tags_seq(
                viterbi_sequences, length)
        else:
            tag_preds = test_data.logits_indices_to_tags_seq(
                logits_indices, length)
        tag_corrects = test_data.logits_indices_to_tags_seq(
            output_data_indices, length)
        test_prec, test_rec, test_f1 = ChunkEval.compute_f1(
            tag_preds, tag_corrects)
        print('test precision, recall, f1(chunk): ', test_prec, test_rec,
              test_f1)
Exemple #13
0
def _test():
    d = {
        'name': 'white',
        'location': '../test/white.csv',  # Ugly, but effective.
        'id': 'hash',
        'data': {
            'REMAP': {
                'name': 0,
                'hash': 1,
                'date.created': 2,
                'comment': 3,
            },
        },
    }
    csvin = Input(**d)
    from pprint import pprint as pp
    return csvin.get('41e25e514d90e9c8bc570484dbaff62b')['name'] == 'cmd.exe'
Exemple #14
0
    def __init__(self):
        space: Optional[str] = os.environ.get("PK_EXA_SPACE")
        if space is not None:
            pk.set_default_space(pk.ExecutionSpace(space))
            if space == "Cuda":
                pk.enable_uvm()

        self.system = System()
        self.system.init()

        self.input = Input(self.system)

        self.integrator: Integrator = None
        self.binning: Binning = None
        self.force: Force = None
        self.neighbor: Neighbor = None
        self.comm: Comm = None
Exemple #15
0
def scriptSliderProgram():
    input = Input()
    output = Output()
    process = Process()

    scriptText = input.speechScriptInputter()
    for sentence in scriptText:
        while True:
            print(sentence)
            # 음성을 인식해 파일로 변환하는 함수
            input.voiceRecognizer()
            userVoiceText = output.voiceApiCaller()
            print(userVoiceText)
            matchRate = output.speechScriptComparer(userVoiceText, sentence)
            print(matchRate)
            if matchRate > 0.6:
                break
Exemple #16
0
def main(stdscr):
    finished = False

    gui = CursesInterface(stdscr)
    inpt = Input(stdscr, 0)

    menu = MenuScreen()
    game = Game()

    gui.initialize()
    inpt.initialize()

    while not menu.finished:
        events = inpt.poll_events()
        gui.clear()
        menu.update(events)
        render_menu(gui, menu)
        gui.refresh()
        time.sleep(0.1)

    if menu.current_idx == 0:
        gamewrapper = LocalVsGameWrapper(inpt, game)
    elif menu.current_idx == 1:
        gamewrapper = LocalAiGameWrapper(inpt, game)
    elif menu.current_idx == 2:
        gamewrapper = NetworkedGameWrapper(inpt, game, Server(), 1, 2)
    else:
        gamewrapper = NetworkedGameWrapper(inpt, game,
                                           Client(menu.get_server_ip_str()), 2,
                                           1)

    gamewrapper.initialize()
    while not game.finished:
        gamewrapper.update()
        gui.clear()
        render_game(gui, game)
        time.sleep(0.05)

    gamewrapper.cleanup()

    while True:
        events = inpt.poll_events()
        if events:
            return
        time.sleep(0.1)
Exemple #17
0
 def __init__(self):
     self._running = True
     self.size = self.width, self.height = 1024, 768
     self.sprites = {}
     self.display = None
     self.map = TileMap(200, 100)
     self.input = Input()
     self.map_offset = [0, 0]
     self.last_coords = None
     self.font = None
     self.tip_label = None
     self.help_label = None
     self.version_label = None
     self.status = 0
     self.last_status = 0
     self.selected_tile_name = "Dirt Background"
     self.fps_label = None
     self.clock = None
Exemple #18
0
    def __init__(self, H, W, pattern):
        '''
            Explanation: Initialisation function for the class

            Parameters:
                start_X: Starting position of the paddle
                end_X: Ending position of the paddle
                thickness: Thickness of the paddle
                V: Velocity of the paddle
        '''
        self.H = H
        self.W = W
        self.pattern = pattern
        self.start_X = W // 2 - PADDLE_LENGTH // 2
        self.end_X = self.start_X + PADDLE_LENGTH
        self.thickness = PADDLE_THICKNESS
        self.V = PADDLE_V
        self.input = Input()
Exemple #19
0
def main():
    input = Input()
    trainX, trainY, testX, testY = input.init()
    trainNum = trainX.shape[0]
    testNum = testX.shape[0]

    clf = RandomForestClassifier(n_estimators=100,
                                 max_features=3,
                                 min_samples_split=3)
    clf = clf.fit(trainX, trainY)

    result = clf.predict(trainX)
    correct = compare(result, trainY, trainNum)
    print('train correct num = ', correct, ', ratio = ', correct / 32561)

    result = clf.predict(testX)
    correct = compare(result, testY, testNum)
    print('test correct num = ', correct, ', ratio = ', correct / 16281)
Exemple #20
0
def main():
    input = Input()
    trainX, trainY, testX, testY = input.init()

    tot = 0
    for times in range(5):
        print('times = ', times)

        kf = KFold(n_splits=5)
        cur = 0
        rocs = np.zeros(5)

        for train_index, test_index in kf.split(trainX, trainY):
            print('fold = ', cur)

            X_train, X_test = trainX[train_index], trainX[test_index]
            Y_train, Y_test = trainY[train_index], trainY[test_index]

            trainNum = X_train.shape[0]
            testNum = X_test.shape[0]

            T = 150
            clfs = list()
            for i in range(T):
                partX, partY = bootstrap(X_train, Y_train)
                clf = tree.DecisionTreeClassifier(min_samples_split=3,
                                                  max_features=3)
                clf = clf.fit(partX, partY)
                clfs.append(clf)

            testPredict = np.zeros((T, testNum))
            for i in range(T):
                testPredict[i, :] = clfs[i].predict(X_test)

            aveSum, result = vote(testPredict, T, testNum)

            rocs[cur] = roc_auc_score(Y_test, aveSum)

            print(compare(Y_test, result, testNum), testNum)
            cur += 1

        tot += sum(rocs) / 5

    print(tot / 5)
Exemple #21
0
    def __init__(self, game):
        GameState.__init__(self, game)

        self.input_map = {
            #Input(key='any'): lambda key, mod, unicode_key: self.any_key_pressed(key, mod, unicode_key),
            Input(key=pg.K_ESCAPE):
            lambda _: sys.exit()
        }

        self.list_menu = UI.ListMenu(
            items=('Start SP Game', 'Start MP Game', 'Host', 'Connect',
                   'Exit'),
            pos=(300, 300),
            align=('center', 'center'),
            text_align=('center'),
            font=fonts.main_menu_font,
            selected_font=fonts.main_menu_selected_font)

        self.ui_container.add_element(self.list_menu)
Exemple #22
0
 def create_win(self):
     self.frdls = FrdLs(
         self.stdscr,
         self._frdls_info.smaxcol - self._frdls_info.smincol + 1,
         self._frdls_info.sminrow, self._frdls_info.smincol,
         self._frdls_info.smaxrow, self._frdls_info.smaxcol, self.friends,
         Config['color']['frdls']['fgcolor'],
         Config['color']['frdls']['bgcolor'])
     self.msgls_set[self.current_receiver] = MsgLs(
         self.stdscr,
         self._msgls_info.smaxcol - self._msgls_info.smincol + 1,
         self._msgls_info.sminrow,
         self._msgls_info.smincol,
         self._msgls_info.smaxrow,
         self._msgls_info.smaxcol, [],
         fgcolor=Config['color']['msgls']['fgcolor'],
         bgcolor=Config['color']['msgls']['bgcolor'])
     self.input = Input(
         self.stdscr,
         self._input_info.smaxcol - self._input_info.smincol + 1,
         self._input_info.sminrow, self._input_info.smincol,
         self._input_info.smaxrow, self._input_info.smaxcol, self.text,
         Config['color']['input']['fgcolor'],
         Config['color']['input']['bgcolor'])
     self.command = Command(
         self.stdscr,
         self._command_info.smaxrow - self._command_info.sminrow + 1,
         self._command_info.sminrow, self._command_info.smincol,
         self._command_info.smaxrow, self._command_info.smaxcol,
         self.command, Config['color']['command']['fgcolor'],
         Config['color']['command']['bgcolor'])
     self.statusbar = StatusBar(self.stdscr,
                                1,
                                self._statusbar_info.smaxcol -
                                self._statusbar_info.smincol + 1,
                                self._statusbar_info.sminrow,
                                self._statusbar_info.smincol,
                                self._statusbar_info.smaxrow,
                                self._statusbar_info.smaxcol,
                                "NORMAL",
                                "xxxxxx",
                                fgcolor=curses.COLOR_BLACK,
                                bgcolor=curses.COLOR_WHITE)
Exemple #23
0
    def __init__(self, map, surface, **kwargs):
        super().__init__(**kwargs)

        self._map = map
        self._tool = None
        self._brush = None
        self._painting = None

        surf = Surface((map.w, map.h))
        self._display = Display(surf, klass=surf.get_rect(), transparent=True, alpha=75)
        for tile in self._map.getMap().values():
            tile.subscribe("editor", self._update)

        self._createMenu(surface)

        self._input = Input(inputStream=self.getInputStream())
        self._input.set(pygame.KEYDOWN, self.toggleShowing, pygame.K_o)
        self._input.set(pygame.KEYDOWN, self.menu.toggleEnabled, pygame.K_t)
        self._input.set(pygame.KEYDOWN, self._map.save, pygame.K_RETURN)
Exemple #24
0
def run():
    Censor = Filter(settings.limit, settings.ban, settings.topics)
    Starter = Censor.reTopic()
    Reader = Input(settings.timer)

    Bots = []
    for i in range(settings.NumBots):
        Bots.append(
            Bot(BotSettings.name[i], BotSettings.color[i],
                BotSettings.voice[i], BotSettings.voice_rate[i], Censor))

    tRandomBot = -1

    while True:
        if 'tReply' not in locals():
            tReply = Starter
        tRandomBot = Censor.randomBot(Bots, tRandomBot)
        tReply = Bots[tRandomBot].ask(tReply)
        tReply = Reader.Interupt(tReply, Censor)
Exemple #25
0
def test_taillard_20_wyniki():
    x = range(1, 11, 1)
    y = []
    ox_label = "Nr testu [1..10]"
    oy_label = "Czas zakonczenia ostatniego zadania na ostatniej maszynie [s]"
    legend = ["Taillard", "NEH", "Random100", "Tabu100", "NEHTabu100"]
    title = "Taillard 20 jobs 10 machines - porownanie wynikow"

    naive = [1582, 1659, 1496, 1378, 1419, 1397, 1484, 1538, 1593, 1591]
    neh = []
    random100 = []
    tabu100 = []
    nehtabu100 = []

    for test_number in range(1, 11):
        print "test = {}".format(test_number)
        user_input = Input()
        user_input.load_from_file(
            "testy/Taillard_20jobs_10machines_{}.txt".format(test_number))
        solver = Solver(user_input)

        # naive_sol = solver.naive_approach()[1]
        neh_sol = solver.neh_algorithm()[1]
        random100_sol = 0
        tabu100_sol = 0
        nehtabu100_sol = 0

        for iteration in range(0, 5):
            random100_sol += solver.random_search_approach(100)[1]
            tabu100_sol += solver.tabu_search_approach(100, 5, 3)[1]
            nehtabu100_sol += solver.tabu_search_with_neh(100, 5, 3)[1]

        # naive.append(naive_sol)
        neh.append(neh_sol)
        random100.append(random100_sol / 5.0)
        tabu100.append(tabu100_sol / 5.0)
        nehtabu100.append(nehtabu100_sol / 5.0)

    y = [naive, neh, random100, tabu100, nehtabu100]
    plot_graph(x, y, ox_label, oy_label, legend, title,
               "test_taillard_20_wyniki.png")
Exemple #26
0
def start():
    # Set up logger
    logger = logging.getLogger('decoder')
    logger.setLevel(logging.DEBUG)
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    handler = logging.StreamHandler()
    handler.setLevel(logging.ERROR)
    handler.setFormatter(formatter)
    logger.addHandler(handler)

    # Set up components
    preprocessor = Preprocessor()
    clustering = Clustering(5)
    decoder = Decoder(logger)

    _input = Input()
    filename = _input.read_file('audio/testfile3.wav')
    # filename = _input.read_file('audio/testfile4.wav')

    preprocessor.read_csv(filename)
    # preprocessor.read_csv('simulation_2018-09-27_17-13-19.csv')
    preprocessor.plot()
    preprocessor.plot(True)

    preprocessor.process_loudness()
    preprocessor.plot()
    preprocessor.plot(True)

    training_batch = preprocessor.get_batch()
    labels = clustering.train(training_batch)
    mapping = clustering.get_label_mapping()
    signals = list()

    for label in labels:
        signals.append(mapping.get(label))

    for signal in signals:
        decoder.decode(signal)

    print(decoder.message)
Exemple #27
0
def index():

    # Load the input for the calculations from the request
    input = Input(request)

    # Load the parameters for the calculation
    data = {
        'latitude': input.get_float('latitude', -90.0, 90.0),
        'longitude': input.get_float('longitude', -180.0, 180.0),
        'year': input.get_int('year', 1900, 2099),
        'month': input.get_int('month', 1, 12),
        'day': input.get_int('day', 1, 31),
    }

    # Perform the calculation - if possible
    if None not in data.values():
        data['sunrise'], data['sunset'] = sunrise_sunset(**data)

    # Inject the location and timezone if available
    location = input.get('location')
    if location is not None:
        data['location'] = location
    timezone = input.get('timezone')
    if timezone is not None:
        data['timezone'] = timezone

    # Add a few extra variables for the template renderers
    extra = dict(data)
    extra.update({
        'is_post': request.method == 'POST',
        'tz': input.get_timezone(),
    })

    # Write the response
    if input.get_mime() == Input.MIME_TEXT_HTML:
        return render_template('index.html', **extra)
    elif input.get_mime() == Input.MIME_JSON:
        return jsonify(data)
    else:
        return render_template('index.txt', **extra)
Exemple #28
0
def create_test_dataset(test_dir,
                        test_csv_file_path,
                        dataset_name,
                        header=False,
                        is_related_path=False):
    col_0 = 'col_0'
    col_1 = 'col_1'
    col_5 = 'col_5'
    if header:
        col_0 = 'col_0_h'
        col_1 = 'col_1_h'
        col_5 = 'col_5_h'
    schema = Schema.from_csv(csv_path=test_csv_file_path, header=header)
    schema.merge_columns_in_range('col_vector', (2, 4))
    input = Input(schema)
    input.add_categorical_column(col_0)
    input.add_numeric_column(col_1)
    input.add_vector_column('col_vector')
    img2d = Img2DColumn(is_related_path=is_related_path)
    input.add_column(col_5, img2d)
    return Dataset.Builder(input, dataset_name, test_dir,
                           parallelism_level=2).build()
Exemple #29
0
    def __init__(self, levels):
        super().__init__()

        # --- Inits
        os.environ["SDL_VIDEO_CENTERED"] = "1"
        pygame.mixer.pre_init(44100, 16, 2, 4096)
        pygame.init()
        pygame.font.init()

        pygame.display.set_caption(const.gameName)
        self._surface = pygame.display.set_mode(
            (const.screenSize[0] * const.res, const.screenSize[1] * const.res))
        self._clock = pygame.time.Clock()
        self._levAttr = levels
        self._world = None
        self._paused = False

        self._menu = Menu(surface=self._surface,
                          pos=(const.screenSize[0] * const.res / 2 - 50,
                               const.screenSize[1] * const.res / 2 - 100),
                          inputStream=self.getInputStream())
        self._menu.addItem("resume", (0, 0, 100, 48),
                           MColor((255, 0, 0), (0, 0, 255)),
                           MText("Resume", (0, 255, 0)), MAction(self._resume))
        self._menu.addItem("start", (0, 50, 100, 48),
                           MColor((255, 0, 0), (0, 0, 255)),
                           MText("Start", (0, 255, 0)), MAction(self.start))
        self._menu.addItem("editor", (0, 100, 100, 48),
                           MColor((255, 0, 0), (0, 0, 255)),
                           MText("Editor", (0, 255, 0)), MAction(self._editor))
        self._menu.addItem("exit", (0, 150, 100, 48),
                           MColor((255, 0, 0), (0, 0, 255)),
                           MText("Exit", (0, 255, 0)),
                           MAction(self.setFinished))

        self._input = Input(inputStream=self.getInputStream())
        self._input.set(pygame.KEYDOWN, self._menuToggle, pygame.K_m)
        self._input.set(pygame.KEYDOWN, self._pause, pygame.K_p)
Exemple #30
0
def run():
    params = dict()
    load_config(params)
    enrich_params(params)
    log_level = 'tf.logging.{}'.format(str(params.get('log_level')).upper())
    tf.logging.set_verbosity(eval(log_level))
    config = load_sess_config(params)

    estimator = tf.estimator.Estimator(
        model_fn=model_fn,
        model_dir=params.get('model_dir'),
        config=config,
        params=params
    )

    input = Input(params)
    train_spec = tf.estimator.TrainSpec(
        input_fn=lambda: input.input_fn(mode=tf.estimator.ModeKeys.TRAIN, vocabs=params.get('vocabs')),
        max_steps=params.get('max_steps', None),
    )

    eval_spec = tf.estimator.EvalSpec(
        input_fn=lambda: input.input_fn(mode=tf.estimator.ModeKeys.EVAL, vocabs=params.get('vocabs')),
        throttle_secs=params.get('throttle_secs'),
    )

    tf.estimator.train_and_evaluate(
        estimator=estimator,
        train_spec=train_spec,
        eval_spec=eval_spec
    )

    predictions = estimator.predict(
        input_fn=lambda: input.input_fn(mode=tf.estimator.ModeKeys.PREDICT, vocabs=params.get('vocabs')),
    )

    for index, prediction in enumerate(predictions):
        print('index {} : prediction {}'.format(index, prediction))