Example #1
0
 def stop(self):
     if self.running:
         util.timer('Stopping audio')
         self.running = False
         self.stream.stop_stream()
         self.stream.close()
         self.audio.terminate()
Example #2
0
    def stop(self):
        util.timer('Closing GUI')

        self.closed = True
        self.view.close()
        self.debug_view.close()
        self.app.quit()
Example #3
0
    def start(self, show_debug_window=False):
        util.timer('Showing GUI')

        if show_debug_window:
            self.debug_view.show()
        self.view.show()
        self.app.processEvents()
        self.closed = False
Example #4
0
    def setup(self):
        util.timer('Initializing GUI')

        app = QtGui.QApplication([])

        view = pg.GraphicsView()
        view.resize(1000, 700)
        view.setWindowTitle('LED Music Visualizer')
        view.closeEvent = self._close_event
        self.on_close = None

        layout = pg.GraphicsLayout()
        view.setCentralItem(layout)

        spec_viewer_proxy = QtGui.QGraphicsProxyWidget(layout)
        spec_viewer = SpectrogramViewer()
        spec_viewer_proxy.setWidget(spec_viewer)
        layout.addItem(spec_viewer_proxy)

        layout.layout.setRowStretchFactor(0, 1)
        layout.nextRow()

        pixel_viewer_proxy = QtGui.QGraphicsProxyWidget(layout)
        pixel_viewer = PixelViewer()
        pixel_viewer_proxy.setWidget(pixel_viewer)
        layout.addItem(pixel_viewer_proxy)

        layout.layout.setRowStretchFactor(1, 0)
        layout.nextRow()

        labels_layout = pg.GraphicsLayout()
        fps_label = labels_layout.addLabel('FPS: ?')
        time_label = labels_layout.addLabel('Elapsed Time: ?')
        pause_label = labels_layout.addLabel('Resume')
        pause_label.mousePressEvent = self.__pause_pressed
        layout.addItem(labels_layout)

        layout.layout.setRowStretchFactor(2, 0)

        debug_view = pg.GraphicsView()
        debug_view.resize(800, 600)
        debug_view.setWindowTitle('Debug')

        debug_layout = pg.GraphicsLayout()
        debug_view.setCentralItem(debug_layout)

        self.app = app
        self.view = view
        self.spec_viewer = spec_viewer
        self.pixel_viewer = pixel_viewer
        self.fps_label = fps_label
        self.time_label = time_label
        self.pause_label = pause_label

        self.debug_view = debug_view
        self.debug_layout = debug_layout
Example #5
0
def test():
    with timer('image'):
        img = Image('train/372.png')

    print "image size (%d, %d)" % img.size()

    with timer('parse'):
        p = parse_frame(img)
        print '--------------'

    return p
 def start(self):
     util.timer('Connecting to LEDs')
     self.running = True
     self.fadecandy = FadeCandy()
     self.fadecandy.connect()
     self.fadecandy.send_color_settings(
         gamma=2.5,
         r=self.brightness * 1.0,
         g=self.brightness * 0.8,
         b=self.brightness * 0.7
     )
Example #7
0
def run_day(day_num):
    """Solve and print the answers for a given day."""
    print(f"Day {day_num}")
    with timer("downloading input"):
        puzzle_input = get_input(day_num)
    day_module = importlib.import_module(day_module_name(day_num))
    day_state = {}
    with timer("parsing input"):
        parsed_input = apply_trim_args(day_module.parse, puzzle_input,
                                       day_state)
    with timer("running part 1"):
        part1 = apply_trim_args(day_module.part1, parsed_input, day_state)
    with timer("running part 2"):
        part2 = apply_trim_args(day_module.part2, parsed_input, day_state)
    print()
    print(f"part1:\n\n{part1}\n")
    print(f"part2:\n\n{part2}\n")
    print()
def stg2ens_0602w1_delfdba_top300():
    fn_dba = 'data/working/exp45/stg2e45_ens0602w1_delfdbaqe_norerank_topk300.h5'
    fn_npy = 'data/working/exp45/stg2e45_ens0602w1_delfdbaqe_norerank_topk300.npy'

    with timer('Load ensemble descriptors'):
        ds = ensloader.load_desc('modelset_v0602',
                                 'modelset_v0602_weight_v1',
                                 mode='retrieval')
        ds_trn = ensloader.load_desc('modelset_v0602',
                                     'modelset_v0602_weight_v1',
                                     mode='retrieval_dbatrain')

    with timer('DELF-DBAQE'):
        # alpha DBA(index+test)
        ds = delfdbaqe(ds, ds_trn, qe_topk=5, thresh=90)
        loader.save_index_dataset(fn_dba, ds)

    with timer('Generate submission file'):
        euclidsearch.gpux4_euclidsearch_from_dataset(ds, fn_npy, topk=300)
def __search(dataset, fn_out, dba_niters=2, qe_topk=10):
    Path(fn_out).parent.mkdir(parents=True, exist_ok=True)

    feats_test_and_train = np.concatenate(
        [dataset.feats_test, dataset.feats_train], axis=0)

    for i in range(dba_niters):
        print(f'[iter-{i}] start DBA...')

        with timer('Prep faiss index'):
            cpu_train = faiss.IndexFlatL2(feats_test_and_train.shape[1])
            gpu_train = faiss.index_cpu_to_all_gpus(cpu_train)
            gpu_train.add(feats_test_and_train)

        with timer('Search'):
            dists, topk_idx = gpu_train.search(x=feats_test_and_train,
                                               k=qe_topk)

        with timer('Weighting'):
            weights = np.logspace(0, -1.5,
                                  qe_topk).reshape(1, qe_topk,
                                                   1).astype(np.float32)
            feats_test_and_train = (feats_test_and_train[topk_idx] *
                                    weights).sum(axis=1)

    with timer('l2norm'):
        feats_test_and_train = l2norm_numpy(
            feats_test_and_train.astype(np.float32))

    with timer('Save search results'):
        feats_test, feats_train = np.split(feats_test_and_train,
                                           [len(dataset.feats_test)],
                                           axis=0)

        with h5py.File(fn_out, 'a') as f:
            f.create_dataset('feats_train', data=feats_train)
            f.create_dataset('feats_test', data=feats_test)
            f.create_dataset('ids_train',
                             data=dataset.ids_train.astype(dtype='S16'))
            f.create_dataset('ids_test',
                             data=dataset.ids_test.astype(dtype='S16'))
Example #10
0
def gpux4_allpair_similarity(ds, prefix):
    # Use cache
    cache_data = load_cached_result(prefix)
    if cache_data is not None:
        return cache_data

    # Search with GpuMultiple
    co = faiss.GpuMultipleClonerOptions()
    co.shard = True
    vres = []
    for _ in range(4):
        res = faiss.StandardGpuResources()
        vres.append(res)

    cpu_index = faiss.IndexFlatIP(ds.feats_index.shape[1])
    gpu_index = faiss.index_cpu_to_gpu_multiple_py(vres, cpu_index, co)
    gpu_index.add(ds.feats_index)

    # 177sec
    with timer('Prepare all-pair similarity on index dataset'):
        ii_sims, ii_ids = gpu_index.search(x=ds.feats_index, k=100)

    with timer('Save results (index-index)'):
        fn_out = Path(prefix) / "index19_vs_index19_ids.npy"
        fn_out.parent.mkdir(parents=True, exist_ok=True)
        np.save(str(fn_out), ii_ids)
        np.save(str(Path(prefix) / "index19_vs_index19_sims.npy"), ii_sims)

    with timer('Prepare all-pair similarity on test-index dataset'):
        ti_sims, ti_ids = gpu_index.search(x=ds.feats_test, k=100)

    with timer('Save results (test-index)'):
        np.save(str(Path(prefix) / "test19_vs_index19_ids.npy"), ti_ids)
        np.save(str(Path(prefix) / "test19_vs_index19_sims.npy"), ti_sims)

    return edict({
        'ti_sims': ti_sims,
        'ti_ids': ti_ids,
        'ii_sims': ii_sims,
        'ii_ids': ii_ids,
    })
Example #11
0
 def _active_learning_initial_training(
     self, semi_sup: bool, stats: Stats, data_for_plotting: List[Stats],
     learner: Optional[BaseEstimator], sampling_strategy: Callable,
     active_learning_data: ActiveLearningData, labeled_indices: List[int]
 ) -> Tuple[ActiveSemiSup, Stats, List[Stats]]:
     if semi_sup:
         clf = self.semi_supervised_class(**self.semi_supervised_class_args)
         x, y = self._construct_semi_supervised_data(
             active_learning_data.x_train_start,
             active_learning_data.y_train_start,
             active_learning_data.x_train_pool,
             active_learning_data.y_train_pool, labeled_indices)
         clf, elapsed_train = util.timer(clf.fit, **{'X': x, 'y': y})
     else:
         clf, elapsed_train = util.timer(
             ActiveLearner,
             **dict(estimator=learner,
                    query_strategy=sampling_strategy,
                    X_training=active_learning_data.x_train_start.values,
                    y_training=active_learning_data.y_train_start.values))
     predicted, elapsed_query = util.timer(
         clf.predict, **{'X': active_learning_data.x_dev})
     predicted = clf.predict(active_learning_data.x_dev)
     # [:, 1] to get positive class probabilities, semi-sup probabilities can be NaN so skip
     scores = None if semi_sup else clf.predict_proba(
         active_learning_data.x_dev)[:, 1]
     metrics = self._get_metrics(actual=active_learning_data.y_dev,
                                 predicted=predicted,
                                 scores=scores)
     data_for_plotting.append(
         self._get_plotting_row(-1, metrics, elapsed_train, elapsed_query))
     metrics = util.add_prefix_to_dict_keys(metrics, 'initial_')
     stats = util.merge_dicts(stats, {
         'train time (s)': elapsed_train,
         'query time (s)': elapsed_query
     })
     stats = util.merge_dicts(stats, metrics)
     return clf, stats, data_for_plotting
def __prep_faiss_search_results(block_id=1):
    # 1 ~ 32

    dataset = loader.load_train_dataset()
    with timer('Loading train19 landmark dict'):
        landmark_dict = load_train19_landmark_dict()

    size_train = dataset.feats_train.shape[0]
    part_size = int(size_train / 32)
    idx_train_start = (block_id - 1) * part_size
    idx_train_end = (block_id) * part_size
    if block_id == 32:
        idx_train_end = size_train

    cpu_index = faiss.IndexFlatL2(dataset.feats_train.shape[1])
    gpu_index = faiss.index_cpu_to_all_gpus(cpu_index)
    gpu_index.add(dataset.feats_train)
    dists, topk_idx = gpu_index.search(
        x=dataset.feats_train[idx_train_start:idx_train_end], k=1000)

    df = pd.DataFrame(dataset.ids_train[idx_train_start:idx_train_end],
                      columns=['id'])
    df['images'] = np.apply_along_axis(' '.join,
                                       axis=1,
                                       arr=dataset.ids_train[topk_idx])

    print('generate sub')
    rows = []
    for imidx, (_, r) in tqdm.tqdm(enumerate(df.iterrows()), total=len(df)):
        landmark_id = landmark_dict[r['id']]
        same_landmark_images = []
        for rank, imid in enumerate(r.images.split(' ')):
            if landmark_id == landmark_dict[imid]:
                same_landmark_images.append(
                    f'{rank}:{dists[imidx, rank]:.8f}:{imid}')
                if len(same_landmark_images) >= 100:
                    break

        rows.append({
            'id': r['id'],
            'landmark_id': landmark_id,
            'matched': ' '.join(same_landmark_images),
        })

    fn = ('data/working/exp12/'
          f'train19_train19_faiss_search_same_landmarks_blk{block_id}.csv.gz')
    Path(fn).parent.mkdir(parents=True, exist_ok=True)

    print('to_csv')
    df = pd.DataFrame(rows).to_csv(fn, index=False, compression='gzip')
Example #13
0
    def fit(self, samples, target, bfs_threshold=None):
        """Construce multiple trees in the forest.

    Parameters
    ----------
    samples:numpy.array of shape = [n_samples, n_features]
            The training input samples.

    target: numpy.array of shape = [n_samples]
            The training input labels.
    
    bfs_threshold: integer, optional (default= n_samples / 40)
            The n_samples threshold of changing to bfs
    
    Returns
    -------
    self : object
      Returns self
    """
        self.fit_init(samples, target)

        if bfs_threshold is not None:
            self.bfs_threshold = bfs_threshold

        if self.verbose:
            print "bsf_threadshold : %d; bootstrap : %r; min_samples_split : %d" % (
                self.bfs_threshold, self.bootstrap, self.min_samples_split)
            print "n_samples : %d; n_features : %d; n_labels : %d; max_features : %d" % (
                self.stride, self.n_features, self.n_labels, self.max_features)

        self._trees = [
            RandomClassifierTree(self) for i in xrange(self.n_estimators)
        ]

        for i, tree in enumerate(self._trees):
            si, n_samples = self._get_sorted_indices(self.sorted_indices)

            if self.verbose:
                with timer("Tree %s" % (i, )):
                    tree.fit(self.samples, self.target, si, n_samples)
                print ""
            else:
                tree.fit(self.samples, self.target, si, n_samples)

        self.fit_release()
        return self
Example #14
0
  def fit(self, samples, target, bfs_threshold = None):
    """Construce multiple trees in the forest.

    Parameters
    ----------
    samples:numpy.array of shape = [n_samples, n_features]
            The training input samples.

    target: numpy.array of shape = [n_samples]
            The training input labels.
    
    bfs_threshold: integer, optional (default= n_samples / 40)
            The n_samples threshold of changing to bfs
    
    Returns
    -------
    self : object
      Returns self
    """
    self.fit_init(samples, target)
    
    if bfs_threshold is not None: 
      self.bfs_threshold = bfs_threshold
    
    if self.verbose: 
      print "bsf_threadshold : %d; bootstrap : %r; min_samples_split : %d" % (bfs_threshold, 
          self.bootstrap,  self.min_samples_split)
      print "n_samples : %d; n_features : %d; n_labels : %d; max_features : %d" % (self.stride, 
          self.n_features, self.n_labels, self.max_features)

  
    self._trees = [RandomClassifierTree(self) for i in xrange(self.n_estimators)] 

    for i, tree in enumerate(self._trees):
      si, n_samples = self._get_sorted_indices(self.sorted_indices)

      if self.verbose: 
        with timer("Tree %s" % (i,)):
          tree.fit(self.samples, self.target, si, n_samples)   
        print ""
      else:
        tree.fit(self.samples, self.target, si, n_samples)   
    
    self.fit_release()
    return self
Example #15
0
def main():
    """Main entry point; runs a day based on command line input."""
    if len(sys.argv) < 2:
        print(f"usage: python {sys.argv[0]} [<day numbers> | all]")
        print("example:\n" + f"$ python {sys.argv[0]} 1 # solves day 1\n" +
              f"$ python {sys.argv[0]} 2 5 # solves days 2 and 5\n" +
              f"$ python {sys.argv[0]} all # solves all days in the repo\n")
        sys.exit(1)
    with timer("overall"):
        if sys.argv[1:] == ["all"]:
            day_num = 1
            while has_day(day_num):
                run_day(day_num)
                day_num += 1
        else:
            for day_num in (int(arg) for arg in sys.argv[1:]):
                run_day(day_num)
    sys.exit(0)
def __scoring_with_top100_arcfacefish_v4():
    dataset = loader.load_train_dataset()

    fn_out = 'data/working/exp12/v7_fish_dba2qe10.h5'
    if not Path(fn_out).exists():
        __search(dataset, fn_out, dba_niters=2, qe_topk=10)
    dataset = loader.load_train_dataset_singlefile(fn_out)

    with timer('Loading train19 landmark dict'):
        landmark_dict = load_train19_landmark_dict()

    fn_sub = 'data/working/exp12/v7_fish_nodba_top40_train19_v4.csv.gz'

    cpu_index = faiss.IndexFlatL2(dataset.feats_train.shape[1])
    gpu_index = faiss.index_cpu_to_all_gpus(cpu_index)
    gpu_index.add(dataset.feats_train)
    dists, topk_idx = gpu_index.search(x=dataset.feats_test, k=100)

    df = pd.DataFrame(dataset.ids_test, columns=['id'])
    df['images'] = np.apply_along_axis(' '.join,
                                       axis=1,
                                       arr=dataset.ids_train[topk_idx])

    print('generate sub')
    rows = []
    max_value = sum([np.exp(np.sqrt(i)) for i in range(40)])
    for _, r in tqdm.tqdm(df.iterrows(), total=len(df)):
        image_ids = [name.split('/')[-1] for name in r.images.split(' ')]
        counter = Counter()
        for i, image_id in enumerate(image_ids[:40]):
            landmark_id = landmark_dict[image_id]
            counter[landmark_id] += np.exp(-np.sqrt(i + 1))
        landmark_id, score = counter.most_common(1)[0]
        score = score / max_value
        rows.append({
            'id': r['id'],
            'landmarks': f'{landmark_id} {score:.9f}',
        })

    print('to_csv')
    df = pd.DataFrame(rows)
    df_sub = pd.read_csv('data/recognition_sample_submission.csv')
    df_sub = df_sub[['id']].merge(df, how='left', on='id')
    df_sub[['id', 'landmarks']].to_csv(fn_sub, index=False, compression='gzip')
Example #17
0
def fit_lgbm(
    X,
    y,
    params: dict = None,
    fit_params: dict = None,
    cv: list = None,
    metric: Callable = MSE,
):
    if cv is None:
        cv = KFold(n_splits=5, shuffle=True).split(X)

    models = []
    oof_pred = np.zeros_like(y, dtype=np.float64)

    for i, (train_idx, valid_idx) in enumerate(cv):
        X_train, y_train = X[train_idx], y[train_idx]
        X_valid, y_valid = X[valid_idx], y[valid_idx]

        model = LGBMRegressor(**params)
        with timer(prefix=f"fit fold={i+1}\t", suffix="\n"):
            model.fit(
                X_train,
                y_train,
                eval_set=[(X_train, y_train), (X_valid, y_valid)],
                **fit_params,
            )

        fold_pred = model.predict(X_valid)
        oof_pred[valid_idx] = fold_pred

        models.append(model)

        fold_score = np.sqrt(metric(y_valid, fold_pred))

        print(f"fold {i+1} score: {fold_score:.2f}")
        print("=" * 40 + "\n")

    oof_score = np.sqrt(metric(y, oof_pred))

    print(f"FINISHED \ whole score: {oof_score:.2f}")
    print("\n" + "=" * 40 + "\n")
    return models, oof_pred
Example #18
0
    def __init__(self, path, audio_volume):
        self.path = path
        self.audio_volume = audio_volume

        if self.path is None:
            util.timer('Generating debug audio')
            self.sample_rate = 44100
            self.channels = 1
            self.sample_width = 2
            self.samples = np.arange(0.0, 10.0, 1 / self.sample_rate)
            self.samples = np.cos(2 * np.pi * (1000 * self.samples + 100 * np.sin(2 * np.pi * 0.5 * self.samples)))
            self.samples = (self.samples * ((1 << 15) - 1)).astype(np.int16)
        else:
            util.timer('Loading audio')
            print('Loading audio from {}'.format(path))
            seg = pydub.AudioSegment.from_mp3(path)
            self.sample_rate = seg.frame_rate
            self.channels = seg.channels
            self.sample_width = seg.sample_width
            self.samples = np.array(seg.get_array_of_samples())

        self.samples = np.reshape(self.samples, (-1, self.channels))
        self.sample_count = self.samples.shape[0]
        self.duration = self.sample_count / self.sample_rate
        print('Samples: {:,d}'.format(self.sample_count))

        util.timer('Creating audio stream')
        self.audio = pyaudio.PyAudio()
        self.stream = self.audio.open(
            rate=self.sample_rate,
            channels=self.channels,
            format=pyaudio.get_format_from_width(self.sample_width, unsigned=False),
            output=True,
            stream_callback=self._update_stream,
            start=False
        )
        self.stream_pos = 0

        self.running = False
    def __init__(self, audio_samples, sample_rate):
        self.sample_rate = sample_rate
        self.sample_count = audio_samples.shape[0]
        self.duration = self.sample_count / self.sample_rate

        if os.path.exists(self.cache_path):
            util.timer('Loading cached spectrogram')
            spec_data = np.load(self.cache_path)
        else:
            util.timer('Creating spectrogram')
            spec_data = _make_spectrogram(audio_samples, self.sample_rate,
                                          1 << 9)
            util.timer('Saving spectrogram to cache')
            np.save(self.cache_path, np.array(spec_data))
        self.spec, self.spec_grad, self.spec_freqs = spec_data
        self.spec_idxs = np.arange(len(self.spec_freqs))

        self.frame_count = self.spec.shape[0]
        self.frame_rate = self.frame_count / self.duration

        self.canvas = QtGui.QImage(*config.DISPLAY_SHAPE,
                                   QtGui.QImage.Format_RGB32)
        self.lyric_pos_filter = ExpFilter(0, 0.1, 0.1)
        self.chorus_bg_pos_filter = ExpFilter(0, 0.1, 0.1)
Example #20
0
from collections import defaultdict

from util import timer

if __name__ == "__main__":
    with timer():
        with open("input") as file:
            init = [int(n) for n in file.readline().strip().split(",")]

        turn, seen, prev = 1, defaultdict(list), None
        while turn <= 2020:
            if turn < len(init) + 1:
                n = init[turn - 1]
            else:
                if len(seen[prev]) < 2:
                    n = 0
                else:
                    n = seen[prev][-1] - seen[prev][-2]

            seen[n].append(turn)
            prev = n
            turn += 1

        print(turn, prev)
Example #21
0
def plot(df, df_cum):
    import matplotlib.pyplot as plt
    import os

    fig, (ax1, ax2) = plt.subplots(1, 2, sharex=True, sharey=True)
    fig.set_size_inches(8, 3)
    df_cum.plot(ax=ax1)
    df.plot.bar(ax=ax2)
    ax1.yaxis.grid()
    ax2.yaxis.grid()
    # will eror if folder does not exist
    fig.savefig(os.path.join("images", "task1.png"))


# Solve model
with timer() as t:
    feasibility = model.solve()

# Report
print("Status:", pulp.LpStatus[feasibility])
if feasibility == 1:
    print("Solution found in %.4f seconds" % t.elapsed)
    # show results
    planned_x = [v.value() for v in x.values()]
    df = pd.DataFrame(
        dict(
            production=planned_x,
            purchases=purchases,
            inventory=cumsum(planned_x) - cum_purchases,
        ))
    df_cum = pd.DataFrame(
 def stop(self):
     if self.running:
         util.timer('Disconnecting from LEDs')
         self.running = False
         self._send_off()
         self.fadecandy.disconnect()
    from train_helper import to_sequence, prepare_emb, load_w2v
    from model.classification_model import NeuralNet
    from model.vector_transformer import embedding_expander
    from trainer import NNTrainer
    parser = argparse.ArgumentParser()
    parser.add_argument("--exp", default="exp")
    parser.add_argument("--source_embedding")
    parser.add_argument("--target_embedding")
    parser.add_argument("--device", default="cpu")
    parser.add_argument("--n_epochs", default=10, type=int)

    args = parser.parse_args()
    assert args.source_embedding

    logger = get_logger(name="Main", tag=args.exp)
    with timer("Load Data", logger):
        loader = DataLoader("../input/text")

    with timer("tokenize", logger):
        loader.tokenize(tokenizer)

    train, test = loader.load()
    X = train["tokenized"]
    X_test = test["tokenized"]

    y = train["label"]
    y_test = test["label"]

    with timer("Convert to sequence", logger):
        X, X_test, word_index = to_sequence(
            X, X_test, max_features=95000, maxlen=1200)
Example #24
0
    def index(self):
        if 'content-length' in cherrypy.request.headers and \
           'content-type' in cherrypy.request.headers and \
           cherrypy.request.headers['content-type'] == 'application/json':
            length = int(cherrypy.request.headers['content-length'])
            json_string = cherrypy.request.body.read(length).decode('utf-8')
            update = telebot.types.Update.de_json(json_string)
            bot.process_new_updates([update])
            return ''
        else:
            raise cherrypy.HTTPError(403)


# ТАЙМЕР ПРОВЕРКИ ВРЕМЕНИ С МОМЕНТА ОТВЕТА СПЕЦИАЛИСТА

util.timer(util.check_user_noreply_interval, config.reply_checking_interval)

# ХЭНДЛЕРЫ СООБЩЕНИЙ ПОЛЬЗОВАТЕЛЯ


# Хэндлер на первое сообщение
@bot.message_handler(
    func=lambda message: db.get_user(message.chat.id, 'email_address') == None
    and message.chat.id not in ALL and message.text not in ALL_SPEC,
    content_types=['text'])
def first_message(message):

    user = db.get_user_namedtuple(message.chat.id)
    if not user:
        regcode = randint(1111, 9999)
        new_user = (['first_name', message.chat.first_name], [
Example #25
0
def train(params):
    global_step = tf.contrib.framework.get_or_create_global_step()
    #initialize model, environment and experience---------
    env = md.Environment(params['frame_skip'], params['game_name'])
    mod = md.DQN(params)
    args = [
        params['load_prev'], params['input_size'], params['frame_stack'],
        params['max_epi'], params['replay_start'], params['exp_file']
    ]
    exp = md.Experience(*args)  #get all arguments from args list
    #-----------------------------------------------------

    #-----Part 1---------
    frame_stack_ph = tf.placeholder(tf.uint8, [params['frame_stack']] +
                                    params['orig_inp'])
    #frame stack placeholder
    preprocess = mod.preprocess(frame_stack_ph)
    #preprocessed input
    israndom_ph = tf.placeholder(tf.bool)
    #placeholder for getting random action
    action = mod.get_action([preprocess], israndom=israndom_ph)
    #keep in mind that action is of size [1]
    #here, should run action and store experience into Experience data

    #----Part 2----------
    #get batch of state,action,reward,new state,done
    state_shape = [params['batch_size']
                   ] + params['input_size'] + [params['frame_stack']]
    #shape that state (and new state) is in
    state_ph = tf.placeholder(tf.uint8, shape=state_shape)  #state placeholder
    action_ph = tf.placeholder(tf.int64, shape=[params['batch_size']])
    reward_ph = tf.placeholder(tf.float32, shape=[params['batch_size']])
    new_state_ph = tf.placeholder(tf.uint8, shape=state_shape)
    done_ph = tf.placeholder(tf.bool, shape=[params['batch_size']])
    batch_ph = [state_ph, action_ph, reward_ph, new_state_ph, done_ph]
    #batch_ph is not a placeholder itself, but a collection of placeholders
    train_opt = mod.train(global_step, batch_ph)
    assign_list = mod.switch_params()

    #------training session-----------
    if params['load_prev']:
        saver = tf.train.Saver()
        ckpt = tf.train.get_checkpoint_state(params['checkpoint_dir'])
    with tf.train.MonitoredTrainingSession(
            checkpoint_dir=params['checkpoint_dir']) as sess:
        if params['load_prev']:
            saver.restore(sess, ckpt.model_checkpoint_path)
        #document steps
        eps_step = 0  #number of episodes that passed
        time_step = 0  #steps after experience replay has started
        total_start_time = time.time()
        total_step = 0  #steps in total
        while eps_step <= params['step_cap']:
            mod.init_frame_stack()
            #initialize frame stack
            x1 = env.reset(
            )  #start the environment and get initial observation
            eps_run_time = time.time()  #start the runtime of an episode
            step_in_ep = 0  #steps passed in current episode
            mod.add_frame(x1)  #add initial observation into the stack
            total_r = 0
            while True:
                #part 1---------
                experience_dict = {}
                israndom_val = random.random() <= mod.rand_act_prob[0]
                #get a random bool
                experience_dict['state'], [experience_dict['action']
                                           ] = sess.run([preprocess, action],
                                                        feed_dict={
                                                            frame_stack_ph:
                                                            mod.get_stack(),
                                                            israndom_ph:
                                                            israndom_val
                                                        })
                #get state and action values
                if mod.rand_act_prob > params['rand_action'][1]:
                    mod.rand_act_prob[0] -= mod.rand_act_prob[1]
                new_unprocessed_state_val, experience_dict[
                    'reward'], experience_dict['done'] = env.run(
                        experience_dict['action'])
                mod.add_frame(new_unprocessed_state_val)
                experience_dict['new_state'] = sess.run(
                    preprocess, feed_dict={frame_stack_ph: mod.get_stack()})
                exp.add_exp(experience_dict)
                #add experience
                #part 2---------
                batch_val = exp.get_batch(params['batch_size'])
                if not batch_val is None:
                    sess.run([train_opt],
                             feed_dict={
                                 batch_ph[i]: batch_val[i]
                                 for i in range(len(batch_ph))
                             })
                    if not time_step % params['target_update']:
                        sess.run(assign_list)
                    time_step += 1
                total_step += 1
                step_in_ep += 1
                total_r += experience_dict['reward']
                if experience_dict['done']:
                    cur_eps_run_time = ut.timer(time.time() - eps_run_time)
                    total_run_time = ut.timer(time.time() - total_start_time)
                    string = "episodes ran: %d,steps ran in episode: %d, Total steps taken: %d,reward: %.4f,episode run time:%s,total run time:%s"
                    print string % (eps_step, step_in_ep, total_step, total_r,
                                    cur_eps_run_time, total_run_time)
                    break
            eps_step += 1
# util.timer(start_time)

model = xgb.XGBClassifier(
    max_depth=4,
    learning_rate=0.1,
    n_estimators=200,
    silent=False,
    objective='binary:logistic',
    subsample=1.0,
    min_child_weight=1,
    gamma=1.5,
    colsample_bytree=0.6,
    nthread=4,
)

start_time = util.timer(None)

model.fit(X,
          y,
          eval_metric='auc',
          eval_set=[(X_train, y_train), (X_test, y_test)],
          verbose=True)

# model = pickle.load(open('../xgb-model-bestparams.sav', 'rb'))

test_data = pd.read_csv('../data/test.csv')
id_codes = test_data['ID_code'].values
y_pred = model.predict(test_data.drop(['ID_code'], axis=1).values)

submission = pd.DataFrame(np.column_stack((id_codes, y_pred)),
                          columns=['ID_code', 'target'])
from SimpleCV import Color, Display, Image
from Quartz.CoreGraphics import CGRectMake
import time

from brain import decide_left_or_right, get_start, find_path, smooth_path
from parse import parse_frame
from screenshot import get_frame
from util import timer, show_img
from viz import show_frame, show_frame2, draw_grid

wh = (w, h) = 768, 480
region = CGRectMake(672, 45, w, h)

with timer('frame'):
    frame = Image('train/12a.png')
    parsed_frame = parse_frame(frame)
    to_press = None
    if parsed_frame:
        to_press = decide_left_or_right(parsed_frame)
        print to_press
    current_pressed = to_press

    start = get_start(parsed_frame)
    path = find_path(start, 50, parsed_frame.rot_arr)
    path = smooth_path(path)

    dl = parsed_frame.rot_img.dl()
    w, h = parsed_frame.rot_img.size()

    for p in path:
        px = p[0] * w / 100
Example #28
0
        signal.signal(signal.SIGINT, signal.SIG_DFL)
    signal.signal(signal.SIGINT, sigint)
    gui.on_close = lambda: audio.stop()

    gui.spec_viewer.on_scrub = lambda t: audio.skip_to(t)

    gui.spec_viewer.set_spectrogram(animation.spec, animation.spec_freqs, animation.frame_rate)

    gui.start(args.show_debug_window)

    if display: display.start()
    audio.start()

    frame_times = []

    util.timer('Running visualization')

    try:
        while audio.running:
            t = audio.elapsed_time

            frame_timer = time.time()
            frame_times.append(frame_timer)
            while frame_timer - frame_times[0] >= 1.0:
                frame_times.pop(0)

            pixels = animation.get_frame(t)

            if display: display.send_pixels(pixels)

            gui.update_fps(len(frame_times))
Example #29
0
    def _fit(self, X_train, y_train, n_epochs=50, eval_set=()):
        seed_torch()
        x_train = torch.tensor(X_train, dtype=torch.long).to(self.device)
        y = torch.tensor(y_train[:, np.newaxis],
                         dtype=torch.long).to(self.device)

        train = torch.utils.data.TensorDataset(x_train, y)

        train_loader = torch.utils.data.DataLoader(train,
                                                   batch_size=self.train_batch,
                                                   shuffle=True)
        if len(eval_set) == 2:
            x_val = torch.tensor(eval_set[0], dtype=torch.long).to(self.device)
            y_val = torch.tensor(eval_set[1][:, np.newaxis],
                                 dtype=torch.long).to(self.device)
            valid = torch.utils.data.TensorDataset(x_val, y_val)
            valid_loader = torch.utils.data.DataLoader(
                valid, batch_size=self.val_batch, shuffle=False)
        model = self.model(**self.kwargs)
        model.to(self.device)
        optimizer = optim.Adam(model.parameters())
        if self.anneal:
            scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer,
                                                             T_max=n_epochs)
        best_score = -np.inf
        epoch_score = []
        epoch_f1 = []
        epoch_loss = []
        epoch_val_loss = []

        for epoch in range(n_epochs):
            with timer(f"Epoch {epoch+1}/{n_epochs}", self.logger):
                model.train()
                avg_loss = 0.
                for (x_batch, y_batch) in train_loader:
                    y_pred = model(x_batch)
                    loss = self.loss_fn(y_pred, y_batch.squeeze())
                    optimizer.zero_grad()

                    loss.backward()
                    optimizer.step()
                    avg_loss += loss.item() / len(train_loader)

                valid_preds, avg_val_loss = self._val(valid_loader, model)
                score = accuracy_score(eval_set[1],
                                       np.argmax(valid_preds, axis=1))
                f1 = f1_score(eval_set[1],
                              np.argmax(valid_preds, axis=1),
                              average="macro")
                epoch_score.append(score)
                epoch_f1.append(f1)
                epoch_loss.append(avg_loss)
                epoch_val_loss.append(avg_val_loss)
            self.logger.info(
                f"loss: {avg_loss:.4f} val_loss: {avg_val_loss:.4f}")
            self.logger.info(f"val_acc: {score} val_f1: {f1}")
            if self.anneal:
                scheduler.step()
            if f1 > best_score:
                torch.save(model.state_dict(),
                           self.path / f"best{self.fold}.pt")
                self.logger.info(f"Save model on epoch {epoch+1}")
                best_score = f1
        model.load_state_dict(torch.load(self.path / f"best{self.fold}.pt"))
        valid_preds, avg_val_loss = self._val(valid_loader, model)
        self.logger.info(f"Validation loss: {avg_val_loss}")
        self.scores[self.fold] = epoch_score
        self.f1s[self.fold] = epoch_f1
        self.loss[self.fold] = epoch_loss
        self.loss_val[self.fold] = epoch_val_loss
        return valid_preds
Example #30
0
 def start(self):
     util.timer('Starting audio')
     self.running = True