Example #1
0
 def __init__(self, exp_name, graph=None, std_out_period=50,
              save_period=50):
     self._log_path = ("data/" + exp_name + "/" + exp_name + "_" +
                       datetime.now().strftime("%H_%M_%m_%d_%y"))
     self._writer = summ.FileWriter(self._log_path, graph)
     self._summary_mrg = None
     self._writer.flush()
     self._save_period = save_period
     self._std_out_period = std_out_period
Example #2
0
    def get_writer(self):
        def do_summary(ql, al, ul, vl):
            nonlocal writer_step
            writer.add_summary(self.model.get_summary(ql, al, ul, vl),
                               writer_step)
            writer_step += 1

        if not self.writer_path:
            return None
        import tensorflow.summary as su
        writer = su.FileWriter(self.writer_path, self.model.sess.graph)
        writer_step = 0
        return do_summary
Example #3
0
  def train(self, FLAGS):
    # Set up optimizer with fallback
    train_step = tf.train.GradientDescentOptimizer(learning_rate=FLAGS.lr).minimize(self.loss)
    tf.global_variables_initializer().run()

    # Set up logging
    loss_summary = summary.scalar('loss', self.loss)
    writer = summary.FileWriter(FLAGS.output_dir + '/logs', self.session.graph)

    # Load MNIST dataset
    dataset = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
    n_batches = int(dataset.train.num_examples / FLAGS.bs)

    # Learn
    for epoch in range(FLAGS.epochs):
      train_loss = None
      val_loss = None

      # Iterate over batches
      for i in range(n_batches):
        inputs, labels = dataset.train.next_batch(FLAGS.bs)
        _, loss = self.session.run([train_step, self.loss],
          feed_dict={
            self.inputs: inputs,
            self.labels: labels 
          })
        train_loss = loss

        # Log validation loss at every 100th minibatch or end of epoch
        if (i % 150 == 0) or (i == n_batches - 1):
          _, summary_str, val_loss = self.session.run([train_step, loss_summary, self.loss],
          feed_dict={
            self.inputs: dataset.validation.images,
            self.labels: dataset.validation.labels
          })

          # Log progress
          writer.add_summary(summary_str, epoch * n_batches + i)
          frac = (i + 1)/ n_batches
          sys.stdout.write('\r')
          sys.stdout.write((\
            col(CYAN, 'Epoch (%d/%d):') + col(BOLD, '\t[%-10s] %d%% \t') + \
            col(YELLOW, 'Train Loss:') + col(None, ' %.8f ') + '\t' + \
            col(YELLOW, 'Val Loss:') + col(None, ' %.8f')) % \
            (epoch + 1, FLAGS.epochs, '='*int(frac*10), int(frac*100), train_loss, val_loss))
          sys.stdout.flush()

      sys.stdout.write('\n')
Example #4
0
def visual_mode(mode_type='netron',
                pattern_name=None,
                pattern_idx=0,
                graph_path=None):
    def get_graph_def(pattern_name=None, pattern_idx=0, graph_path=None):
        assert (pattern_name is not None) or (graph_path is not None), \
                "pattern_name or graph_path should at least have one is None and the other is not."
        if graph_path is not None:
            name = graph_path.split('/')[-1]
            with open(graph_path, "rb") as f:
                graph_def = tf.GraphDef()
                graph_def.ParseFromString(f.read())
        else:
            name = pattern_name
            graph_def = RegistPattern.get_patterns(pattern_name)[pattern_idx]
        return graph_def, name

    graph_def, name = get_graph_def(pattern_name=pattern_name,
                                    pattern_idx=pattern_idx,
                                    graph_path=graph_path)
    if mode_type == 'netron':
        tmp_dir = tempfile.mkdtemp()
        model_path = tmp_dir + "/" + name
        with open(model_path, "wb") as f:
            f.write(graph_def.SerializeToString())
        netron.start(file=model_path,
                     host=get_ip_address(),
                     port=flags.FLAGS.pt)
        shutil.rmtree(tmp_dir)

    else:  # type == 'tf'
        with Session() as sess:
            tmp_dir = tempfile.mkdtemp()
            tf.import_graph_def(graph_def)
            train_writer = summary.FileWriter(tmp_dir)
            train_writer.add_graph(sess.graph)
            train_writer.flush()
            train_writer.close()
            tb = program.TensorBoard(default.get_plugins())
            tb.configure(argv=[
                None, '--logdir', tmp_dir, '--port', flags.FLAGS.pt, '--host',
                get_ip_address()
            ])
            tb.main()
            shutil.rmtree(tmp_dir)
Example #5
0
    def get_writer(self, sess):
        def summary_details(fd):
            writer.add_summary(sess.run(model_merge, feed_dict=fd),
                               self.writer_step)
            self.writer_step += 1

        def summary_scores(s2v):
            p2v = {s2p[s]: s2v[s] for s in s2p.keys()}
            writer.add_summary(sess.run(score_merge, feed_dict=p2v),
                               len(self.history))

        import tensorflow.summary as su
        writer = su.FileWriter(self.writer_path, sess.graph)
        model_merge = su.merge_all()
        s2p = {t: tf.placeholder(dtype=tf.float32) for t in au.eval_scores}
        score_merge = su.merge(
            [su.scalar(s, p, family='eval') for s, p in s2p.items()])
        return summary_details, summary_scores
Example #6
0
    Such a model can be trained just like Keras Sequential models.

"""
#%% for tensorboard

import os
import glob

os.chdir("E:\\ROBOCZY\\Python\\Keras\\")
for file in glob.glob(".\\events*"):
    os.remove(file)

import tensorflow.summary as tfs
from tensorflow import get_default_graph as gdg  #! ImportError: cannot import name 'get_default_graph' from 'tensorflow'

writer = tfs.FileWriter(".")  #! AttributeError: module 'tensorflow.summary' has no attribute 'FileWriter'

writer.add_graph(gdg())  #! ...

#%%

# This return a tensor

inputs = Input(shape=(100,))

# a layer instance is callable on a tensor and returns a tensor
x = Dense(64, activation='relu')(inputs)
x = Dense(64, activation='relu')(x)
predictions = Dense(10, activation='softmax')(x)

# the model including the Input and three Dense layers
Example #7
0
def _run(resolution=16,
         score_objects=True,
         mean_repeat=20,
         explorer='repeated',
         seen_weight=0.0,
         seen_power=1.0,
         chosen_weight=0.0,
         chosen_power=1.0,
         action_weight=0.0,
         action_power=1.0,
         horiz_weight=0.3,
         vert_weight=0.1,
         low_score_weight=0.5,
         high_score_weight=10.0,
         explore_steps=100,
         ignore_death=1,
         x_repeat=2,
         show=False,
         seed_path=None,
         base_path='./results/',
         clear_old_checkpoints=True,
         game="montezuma",
         chosen_since_new_weight=0,
         chosen_since_new_power=1,
         warn_delete=True,
         low_level_weight=0.1,
         objects_from_pixels=True,
         objects_remember_rooms=True,
         only_keys=True,
         optimize_score=True,
         use_real_pos=True,
         target_shape=(6, 6),
         max_pix_value=255,
         prob_override=0.0,
         reset_pool=False,
         pool_class='py',
         start_method='fork',
         path_postfix='',
         n_cpus=None,
         save_prob_pictures=False,
         save_item_pictures=False,
         keep_prob_pictures=False,
         keep_item_pictures=False,
         batch_size=100,
         reset_cell_on_update=False,
         actors=1,
         nexp=None,
         lr=1.0e-03,
         lr_decay=0.99999,
         cliprange=0.1,
         cl_decay=0.99999,
         n_tr_epochs=2,
         mbatch=4,
         gamma=0.99,
         lam=0.95,
         log_path="log",
         nsubs=8,
         timedialation=20,
         master_lr=0.01,
         lr_decay_master=0.99999,
         master_cl=0.1,
         cl_decay_master=0.99999,
         warmup=20,
         train=40,
         retrain_N=None,
         with_domain=False,
         load_model=None,
         reward_function='clip',
         ent_mas=0.01,
         ent_sub=0.01,
         pacmanScoreRes=None,
         render=None,
         render_frameskip=4,
         clean_up_grid=False):
    sess = None
    if game == "robot":
        explorer = RepeatedRandomExplorerRobot()
    elif explorer == "ppo":
        ncpu = multiprocessing.cpu_count()
        if sys.platform == 'darwin': ncpu //= 2
        config = ConfigProto(allow_soft_placement=True,
                             intra_op_parallelism_threads=ncpu,
                             inter_op_parallelism_threads=ncpu)
        config.gpu_options.allow_growth = True  # pylint: disable=E1101
        os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
        sess = Session(config=config).__enter__()
        if nexp is None:
            nexp = explore_steps
        explorer = PPOExplorer(actors=actors,
                               nexp=nexp,
                               lr=lr,
                               lr_decay=lr_decay,
                               cliprange=cliprange,
                               cl_decay=cl_decay,
                               n_tr_epochs=n_tr_epochs,
                               nminibatches=mbatch,
                               gamma=gamma,
                               lam=lam,
                               ent_coef=ent_sub)
        # if game == 'nchain':
        # 	explorer.init_model(env="NChain-v0", policy=MlpPolicy)
        # else:
        # 	explorer.init_model(env="MontezumaRevengeDeterministic-v4", policy=CnnPolicy)
    elif explorer == 'mlsh':
        ncpu = multiprocessing.cpu_count()
        if sys.platform == 'darwin': ncpu //= 2
        config = ConfigProto(allow_soft_placement=True,
                             intra_op_parallelism_threads=ncpu,
                             inter_op_parallelism_threads=ncpu)
        config.gpu_options.allow_growth = True  # pylint: disable=E1101
        os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
        sess = Session(config=config).__enter__()
        if nexp is None:
            nexp = explore_steps
        explorer = MlshExplorer(nsubs=nsubs,
                                timedialation=timedialation,
                                warmup_T=nexp * warmup,
                                train_T=nexp * train,
                                actors=actors,
                                nexp=nexp // timedialation,
                                lr_mas=master_lr,
                                lr_sub=lr,
                                lr_decay=lr_decay_master,
                                lr_decay_sub=lr_decay,
                                cl_decay=cl_decay_master,
                                cl_decay_sub=cl_decay,
                                n_tr_epochs=n_tr_epochs,
                                nminibatches=mbatch,
                                gamma=gamma,
                                lam=lam,
                                cliprange_mas=master_cl,
                                cliprange_sub=cliprange,
                                retrain_N=retrain_N,
                                ent_m=ent_mas,
                                ent_s=ent_sub)

    elif explorer == 'repeated':
        explorer = RepeatedRandomExplorer(mean_repeat)
    else:
        explorer = RandomExplorer()

    if game == "montezuma":
        game_class = MyMontezuma
        game_class.TARGET_SHAPE = target_shape
        game_class.MAX_PIX_VALUE = max_pix_value
        game_args = dict(score_objects=score_objects,
                         x_repeat=x_repeat,
                         objects_from_pixels=objects_from_pixels,
                         objects_remember_rooms=objects_remember_rooms,
                         only_keys=only_keys,
                         unprocessed_state=True)
        grid_resolution = (GridDimension('level',
                                         1), GridDimension('score', 1),
                           GridDimension('room',
                                         1), GridDimension('x', resolution),
                           GridDimension('y', resolution))
    elif game == "pitfall":
        game_class = pitfall_env.MyPitfall
        game_class.TARGET_SHAPE = target_shape
        game_class.MAX_PIX_VALUE = max_pix_value
        game_args = dict(score_objects=score_objects, x_repeat=x_repeat)
        grid_resolution = (GridDimension('level',
                                         1), GridDimension('score', 1),
                           GridDimension('room',
                                         1), GridDimension('x', resolution),
                           GridDimension('y', resolution))
    elif game == "nchain":
        game_class = MyNChain
        game_class.TARGET_SHAPE = target_shape
        game_class.MAX_PIX_VALUE = max_pix_value
        game_args = dict(N=10000)
        grid_resolution = (GridDimension('state', 1), )
    elif game == "pacman":
        game_class = MyMsPacman
        game_class.TARGET_SHAPE = target_shape
        game_class.MAX_PIX_VALUE = max_pix_value
        game_args = dict(x_repeat=x_repeat,
                         unprocessed_state=True,
                         render=render,
                         frameskip=render_frameskip)
        if pacmanScoreRes is None:
            grid_resolution = (GridDimension('level', 1),
                               GridDimension('x', resolution),
                               GridDimension('y', resolution))
        else:
            grid_resolution = (GridDimension('level', 1),
                               GridDimension('score', pacmanScoreRes),
                               GridDimension('x', resolution),
                               GridDimension('y', resolution))
    else:
        raise NotImplementedError("Unknown game: " + game)

    if game == "nchain":
        selector = NChainSelector(
            game_class,
            seen=Weight(seen_weight, seen_power),
            chosen=Weight(chosen_weight, chosen_power),
            action=Weight(action_weight, action_power),
            room_cells=Weight(0.0),
            dir_weights=DirWeights(horiz_weight, vert_weight, low_score_weight,
                                   high_score_weight),
            chosen_since_new_weight=Weight(chosen_since_new_weight,
                                           chosen_since_new_power),
            low_level_weight=low_level_weight,
            with_domain=use_real_pos)
    elif game == "pacman":
        selector = PacmanSelector(
            game_class,
            seen=Weight(seen_weight, seen_power),
            chosen=Weight(chosen_weight, chosen_power),
            action=Weight(action_weight, action_power),
            room_cells=Weight(0.0),
            dir_weights=DirWeights(horiz_weight, vert_weight, low_score_weight,
                                   high_score_weight),
            chosen_since_new_weight=Weight(chosen_since_new_weight,
                                           chosen_since_new_power),
            low_level_weight=low_level_weight)
    else:
        selector = WeightedSelector(
            game_class,
            seen=Weight(seen_weight, seen_power),
            chosen=Weight(chosen_weight, chosen_power),
            action=Weight(action_weight, action_power),
            room_cells=Weight(0.0),
            dir_weights=DirWeights(horiz_weight, vert_weight, low_score_weight,
                                   high_score_weight),
            chosen_since_new_weight=Weight(chosen_since_new_weight,
                                           chosen_since_new_power),
            low_level_weight=low_level_weight)

    pool_cls = multiprocessing.get_context(start_method).Pool
    if pool_class == 'torch':
        pool_cls = torch.multiprocessing.Pool
    elif pool_class == 'loky':
        pool_cls = LPool

    expl = Explore(explorer,
                   selector, (game_class, game_args),
                   grid_resolution,
                   explore_steps=explore_steps,
                   ignore_death=ignore_death,
                   optimize_score=optimize_score,
                   use_real_pos=use_real_pos,
                   prob_override=prob_override,
                   reset_pool=reset_pool,
                   pool_class=pool_cls,
                   n_cpus=n_cpus,
                   batch_size=batch_size,
                   reset_cell_on_update=reset_cell_on_update,
                   with_domain=with_domain,
                   load_model=load_model,
                   reduce_grid=clean_up_grid)

    if seed_path is not None:
        expl.grid = pickle.load(lzma.open(seed_path, 'rb'))
        print(random.sample(list(expl.grid.keys()), 10))
        print('Number at level > 0: ',
              len([e for e in expl.grid.keys() if e.level > 0]))

    n_digits = 12

    old = 0
    old_compute = 0

    with tqdm(desc='Time (seconds)', smoothing=0,
              total=MAX_TIME) as t_time, tqdm(
                  desc='Iterations', total=MAX_ITERATIONS) as t_iter, tqdm(
                      desc='Compute steps',
                      total=MAX_FRAMES_COMPUTE) as t_compute, tqdm(
                          desc='Game step', total=MAX_FRAMES) as t:
        start_time = time.time()
        last_time = np.round(start_time)
        # TODO: make this more generic for each level switch
        seen_level_1 = False
        n_iters = 0
        prev_checkpoint = None

        def should_continue():
            if MAX_TIME is not None and time.time() - start_time >= MAX_TIME:
                return False
            if MAX_FRAMES is not None and expl.frames_true + old >= MAX_FRAMES:
                return False
            if MAX_FRAMES_COMPUTE is not None and expl.frames_compute + old_compute >= MAX_FRAMES_COMPUTE:
                return False
            if MAX_ITERATIONS is not None and n_iters >= MAX_ITERATIONS:
                return False
            if MAX_LEVEL is not None and len(
                    Counter(e.level for e in expl.grid).keys()) > MAX_LEVEL:
                return False
            if TERM_CONDITION and False:
                return False
            return True

        logDir = f'{log_path}/{game}_{explorer.__repr__()}/res_{resolution}_explStep_{explore_steps}' f'_cellbatch_{batch_size}'
        if explorer.__repr__() == 'ppo':
            logDir = f'{logDir}_actors_{actors}_exp_{nexp}_lr_{lr}_lrDec_{lr_decay}_cl_{cliprange}_clDec_{cl_decay}' \
             f'_mbatch_{mbatch}_trainEpochs_{n_tr_epochs}_gamma_{gamma}_lam_{lam}'
        if explorer.__repr__() == 'mlsh':
            logDir = f'{logDir}_subs_{nsubs}_td_{timedialation}_WU_{warmup}_tr_{train}_exp_{nexp}' \
             f'_lrM_{master_lr}_lrDM_{lr_decay_master}_clM_{master_cl}' \
             f'_clDM_{cl_decay_master}_lrS_{lr}_lrDS_{lr_decay}_clS_{cliprange}_clDS_{cl_decay}' \
             f'_rt_{retrain_N}' \
             f'_mb_{mbatch}_trEp_{n_tr_epochs}_gam_{gamma}_lam_{lam}'
        logDir = f'{logDir}_{time.time()}'
        global LOG_DIR
        LOG_DIR = logDir
        summaryWriter = summary.FileWriter(logdir=logDir, flush_secs=20)
        if sess is not None:
            summaryWriter.add_graph(graph=sess.graph)
        keys_found = []
        removed_cells = 0
        try:
            while should_continue():
                # Run one iteration
                old += expl.frames_true
                old_compute += expl.frames_compute

                expl.run_cycle()

                t.update(expl.frames_true)  #- old)
                t_compute.update(expl.frames_compute)  #- old_compute)
                t_iter.update(1)
                cur_time = np.round(time.time())
                t_time.update(int(cur_time - last_time))
                last_time = cur_time
                n_iters += 1

                if game == 'pacman':
                    entry = [
                        summary.Summary.Value(tag='Rooms_Found',
                                              simple_value=max(
                                                  e.level for e in expl.grid))
                    ]

                else:
                    entry = [
                        summary.Summary.Value(
                            tag='Rooms_Found',
                            simple_value=len(
                                Counter((e.room, e.level)
                                        for e in expl.grid).keys()))
                    ]
                entry.append(
                    summary.Summary.Value(tag='Cells',
                                          simple_value=len(expl.grid) +
                                          removed_cells))
                entry.append(
                    summary.Summary.Value(
                        tag='Top_score',
                        simple_value=max(e.score for e in expl.grid.values())))
                if game == "montezuma":
                    dist = Counter(e.score for e in expl.real_grid)
                    for key in dist.keys():
                        if key not in keys_found:
                            keys_found.append(key)
                    hist = makeHistProto(dist, bins=30, keys=keys_found)
                    entry.append(
                        summary.Summary.Value(tag="Key_dist", histo=hist))
                    leveldist = Counter(e.level for e in expl.real_grid)
                    histlvl = makeHistProto(leveldist, bins=5)
                    entry.append(
                        summary.Summary.Value(tag="Level_dist", histo=histlvl))

                entry.append(
                    summary.Summary.Value(
                        tag="Avg traj-len",
                        simple_value=(expl.frames_compute / batch_size) /
                        explore_steps))
                if sess is not None:
                    bytes = sess.run(tf.contrib.memory_stats.MaxBytesInUse())
                    entry.append(
                        summary.Summary.Value(tag="Memory Use",
                                              simple_value=bytes))

                entry.extend(expl.summary)
                summaryWriter.add_summary(summary=summary.Summary(value=entry),
                                          global_step=expl.frames_compute +
                                          old_compute)

                # summaryWriter.add_run_metadata(expl.explorer.master.metadata, 'master_metadata', global_step=expl.frames_compute + old_compute)
                # for sub in expl.explorer.subs:
                # 	summaryWriter.add_run_metadata(sub.model.metadata, f'{sub}_metadata',
                # 							   global_step=expl.frames_compute + old_compute)
                expl.summary = []

                # In some circumstances (see comments), save a checkpoint and some pictures
                if ((not seen_level_1 and expl.seen_level_1)
                        or  # We have solved level 1
                        old == 0 or  # It is the first iteration
                        old // THRESH_TRUE != expl.frames_true // THRESH_TRUE
                        or  # We just passed the THRESH_TRUE threshold
                        old_compute // THRESH_COMPUTE !=
                        expl.frames_compute // THRESH_COMPUTE
                        or  # We just passed the THRESH_COMPUTE threshold
                        not should_continue()):  # This is the last iteration

                    #Remove old grid entries:
                    if clean_up_grid:
                        to_remove = set()
                        max_level = max(e.level for e in expl.grid)
                        for cell_key in expl.grid:
                            if max_level - cell_key.level > 2:
                                to_remove.add(cell_key)
                        for cell_key in to_remove:
                            del expl.grid[cell_key]
                            removed_cells += 1

                        to_remove = set()
                        for cell_key in expl.real_grid:
                            if max_level - cell_key.level > 2:
                                to_remove.add(cell_key)
                        for cell_key in to_remove:
                            expl.real_grid.remove(cell_key)

                    # Quick bookkeeping, printing update
                    seen_level_1 = expl.seen_level_1
                    filename = f'{base_path}/{expl.frames_true:0{n_digits}}_{expl.frames_compute:0{n_digits}}'

                    tqdm.write(
                        f'Cells at levels: {dict(Counter(e.level for e in expl.real_grid))}'
                    )
                    tqdm.write(
                        f'Cells at objects: {dict(Counter(e.score for e in expl.real_grid))}'
                    )
                    tqdm.write(
                        f'Max score: {max(e.score for e in expl.grid.values())}'
                    )
                    tqdm.write(f'Compute cells: {len(expl.grid)}')

                    # Save pictures
                    if show or save_item_pictures or save_prob_pictures:
                        # Show normal grid
                        if show or save_item_pictures:
                            get_env().render_with_known(
                                list(expl.real_grid),
                                resolution,
                                show=False,
                                filename=filename + '.png',
                                get_val=lambda x: 1,
                                combine_val=lambda x, y: x + y)

                        if not use_real_pos:
                            object_combinations = sorted(
                                set(e.real_cell.score
                                    for e in expl.grid.values()
                                    if e.real_cell is not None))
                            for obj in object_combinations:
                                grid_at_obj = [
                                    e.real_cell for e in expl.grid.values()
                                    if e.real_cell is not None
                                    and e.real_cell.score == obj
                                ]
                                get_env().render_with_known(
                                    grid_at_obj,
                                    resolution,
                                    show=False,
                                    filename=filename + f'_object_{obj}.png',
                                    get_val=lambda x: 1,
                                    combine_val=lambda x, y: x + y)

                        # Show probability grid
                        if (use_real_pos and show) or save_prob_pictures:
                            expl.selector.set_ranges(list(expl.grid.keys()))
                            possible_scores = sorted(
                                set(e.score for e in expl.grid))
                            total = np.sum([
                                expl.selector.get_weight(
                                    x, expl.grid[x], possible_scores,
                                    expl.grid) for x in expl.grid
                            ])
                            get_env().render_with_known(
                                list(expl.grid.keys()),
                                resolution,
                                show=False,
                                filename=filename + '_prob.PNG',
                                combine_val=lambda x, y: x + y,
                                get_val=lambda x: expl.selector.get_weight(
                                    x, expl.grid[x], possible_scores, expl.grid
                                ) / total,
                            )
                        if prev_checkpoint and clear_old_checkpoints:
                            if not keep_item_pictures:
                                try:
                                    os.remove(prev_checkpoint + '.png')
                                except FileNotFoundError:
                                    # If it doesn't exists, we don't need to remove it.
                                    pass
                            if use_real_pos and not keep_prob_pictures:
                                try:
                                    os.remove(prev_checkpoint + '_prob.PNG')
                                except FileNotFoundError:
                                    # If it doesn't exists, we don't need to remove it.
                                    pass

                    with open(filename + ".csv", 'w') as f:
                        f.write(str(len(expl.grid)))
                        f.write(", ")
                        f.write(str(max([a.score
                                         for a in expl.grid.values()])))
                        f.write("\n")

                    # Save checkpoints
                    grid_copy = {}
                    for k, v in expl.grid.items():
                        grid_copy[k] = v
                    # TODO: is 7z still necessary now that there are other ways to reduce space?
                    try:
                        pickle.dump(
                            grid_copy,
                            lzma.open(filename + '.7z', 'wb', preset=0))
                    except MemoryError:
                        print('MemoryError when saving grid checkpoint')
                    # Clean up previous checkpoint.
                    if prev_checkpoint and clear_old_checkpoints:
                        try:
                            os.remove(prev_checkpoint + '.7z')
                        except FileNotFoundError:
                            pass
                    prev_checkpoint = filename

                    # A much smaller file that should be sufficient for view folder, but not for restoring
                    # the demonstrations. Should make view folder much faster.
                    grid_set = {}
                    for k, v in expl.grid.items():
                        grid_set[k] = v.score
                    try:
                        pickle.dump(
                            grid_set,
                            lzma.open(filename + '_set.7z', 'wb', preset=0))
                        pickle.dump(
                            expl.real_grid,
                            lzma.open(filename + '_set_real.7z',
                                      'wb',
                                      preset=0))
                    except MemoryError:
                        print(
                            'MemroyError when saving  set and real_set checkpoint'
                        )

                    if PROFILER:
                        print("ITERATION:", n_iters)
                        PROFILER.disable()
                        PROFILER.dump_stats(filename + '.stats')
                        # PROFILER.print_stats()
                        PROFILER.enable()
                    # Save a bit of memory by freeing our copies.
                    grid_copy = None
                    grid_set = None
        finally:
            # TODO Insert model save here
            if SAVE_MODEL and isinstance(expl.explorer, MlshExplorer):
                expl.explorer.master.save(f'{base_path}/master')
                expl.explorer.master.save(f'{logDir}/master')
                for sub in expl.explorer.subs:
                    sub.save(f'{base_path}/{sub}')
                    sub.save(f'{logDir}/{sub}')
            #print(expl.explorer.__repr__())
            if sess is not None:
                sess.__exit__(None, None, None)
                tf.reset_default_graph()
            else:
                print('did not clear graph')
Example #8
0
    logger.info(net)
    logger.info('model parameters:')
    n_params = 0
    for name, param in net.named_parameters():
        n_params += param.nelement()
        logger.info('%s | %r | %i', name, param.size(), param.nelement())
    logger.info('# parameters: %i', n_params)

    train_loader = problem.get_train_loader(args.batch_size)
    val_loader = problem.get_val_loader(args.batch_size)

    loss_fn = nn.CrossEntropyLoss()

    # Summary writer
    if args.write_summary:
        writer = summary.FileWriter(args.log_dir, flush_secs=10)

    if args.n_epochs <= epoch:
        logger.warn('too few epochs to train')

    # Training loop
    while epoch < args.n_epochs:
        logger.info('starting epoch %i', epoch + 1)
        for batch in tqdm.tqdm(train_loader):
            optimizer.zero_grad()
            # Build a mini-batch
            imgs, labels = batch
            if len(args.gpus) > 0:
                imgs, labels = imgs.cuda(), labels.cuda()
            imgs, labels = Variable(imgs), Variable(labels)
Example #9
0
 def __init__(self, logdir):
     if tb_summary is None:
         raise ValueError("You must install TensorFlow " +
                          "to use Tensorboard summary writer.")
     self._writer = tb_summary.FileWriter(logdir)
def create_embeddings(corpusFilename,
                      outputFilename,
                      vocabSize,
                      batchSize,
                      embeddingDimension,
                      numNegativeExamples,
                      numSteps,
                      validationExamples,
                      skipWindow=1,
                      numSkips=2):
    '''
    :param corpusFilename: 
    :param outputFilename: 
    :param batchSize: 
    :type batchSize: int
    :param embeddingDimension: 
    :param numNegativeExamples: 
    :param validationExamples: if int, chosen at random from the 100 most common words; if list (of string tokens), used for validation directly
    :param skipWindow: 
    :param numSkips: 
    :return: 
    '''

    print(
        'SETUP: batchSize: %d, embeddingDimension: %d, numNegativeExamples: %d, numSteps: %d, corpusFilename: %s'
        % (batchSize, embeddingDimension, numNegativeExamples, numSteps,
           corpusFilename))

    global data_index
    data_index = 0
    tf.reset_default_graph()

    ##### step 0: get tokens data (e.g. download data from http://mattmahoney.net/dc/text8.zip or make your own corpus)

    ##### step 1: read data
    with open(corpusFilename, encoding='utf8') as ifile:
        words = ifile.read().split()
    print('Data size: %d non-unique, %d unique words' %
          (len(words), len(np.unique(words))))

    ##### step 2: build the dictionary -> replace rare words with UNK token
    # VOCAB_SIZE = len(np.unique(words)) + 1  # +1 for the UNK token
    # vocabSize = int(len(np.unique(words)) * 0.8)  # don't use the rare words (most likely people's names in this case)

    if type(vocabSize) == float:  # a portion
        vocabSize = int(len(np.unique(words)) * vocabSize)
    elif type(vocabSize) == None:  # max
        vocabSize = len(np.unique(words)) + 1

    data, count, dictionary, reverse_dictionary = build_dataset(
        words, vocabSize)
    del words  # save memory
    print('Most common words (+UNK)', count[:5])
    print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])

    ##### step 3: generate a training batch for the skip-gram model
    batch, labels = generate_batch(data,
                                   batchSize=50,
                                   numSkips=5,
                                   skipWindow=5)

    print('---- sample target -> neighbor')
    for i in range(10):
        print(batch[i], reverse_dictionary[batch[i]], '->', labels[i, 0],
              reverse_dictionary[labels[i, 0]])

    ##### step 4: Build and train a skip-gram model

    # We pick a random validation set to sample nearest neighbors. Here we limit the
    # validation samples to the words that have a low numeric ID, which by
    # construction are also the most frequent.

    if type(validationExamples) == int:
        validationExamples = np.random.choice(100,
                                              validationExamples,
                                              replace=False)
    else:
        for w in validationExamples:
            if w not in dictionary:
                print('>>>>> Validation word %s is NOT in the corpus!' % w)

        validationExamples = [
            dictionary[w] for w in validationExamples if w in dictionary
        ]

    # graph = tf.Graph()
    sess = tf.InteractiveSession()

    # define input data
    train_inputs = placeholder(tf.int32, shape=[batchSize])
    train_labels = placeholder(tf.int32, shape=[batchSize, 1])
    valid_dataset = tf.constant(validationExamples, dtype=tf.int32)

    # Ops and variables pinned to the CPU because of missing GPU implementation
    with tf.device('/cpu:0'):
        # initialize embeddings to uniform randoms, used for looking up embeddings for inputs
        embeddings = Variable(
            tf.random_uniform([vocabSize, embeddingDimension], -1.,
                              1.))  # ~Unif(-1, 1)
        embed = tf.nn.embedding_lookup(embeddings, train_inputs)

        # construct the variables for the NCE loss
        # nce_weights ~ N(0, 1/sqrt(embedding size)) of size vocab_size x embedding_size
        nce_weights = Variable(
            tf.truncated_normal([vocabSize, embeddingDimension],
                                stddev=1. / math.sqrt(embeddingDimension)))
        # nce_biases ~ vector of zeros of size vocab_size
        nce_biases = Variable(tf.zeros([vocabSize]))

    # define loss function
    # tf.nn.nce_loss automatically draws a new sample of the negative labels each time we evaluate the loss
    loss = reduce_mean(
        tf.nn.nce_loss(weights=nce_weights,
                       biases=nce_biases,
                       inputs=embed,
                       labels=train_labels,
                       num_sampled=numNegativeExamples,
                       num_classes=vocabSize))
    summary.scalar('loss', loss)

    # define the optimizer
    optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)

    # compute the cosine similarity between minibatch examples and all embeddings
    normalized_embeddings = embeddings / tf.sqrt(
        reduce_sum(tf.square(embeddings), 1, keep_dims=True))
    valid_embeddings = tf.nn.embedding_lookup(normalized_embeddings,
                                              valid_dataset)
    similarity = matmul(valid_embeddings,
                        normalized_embeddings,
                        transpose_b=True)

    ##### step 5: train!
    # set up tensorboard
    merged_summaries = summary.merge_all()
    for f in glob.glob('./logs/word2vec/train/*'):
        os.remove(f)
    for f in glob.glob('./logs/word2vec/validation/*'):
        os.remove(f)
    train_writer = summary.FileWriter('./logs/word2vec/train', sess.graph)
    valid_writer = summary.FileWriter('./logs/word2vec/validation')
    sess.run(tf.global_variables_initializer())

    reportPeriod = max(int(0.01 * numSteps), 100)
    validation_period = min(
        5000, int(0.05 * numSteps)
    )  # Note that this is expensive (~20% slowdown if computed every 500 steps)
    average_loss = 0  # average loss per "reporting" period

    for step in range(numSteps):

        batch_inputs, batch_labels = generate_batch(data, batchSize, numSkips,
                                                    skipWindow)
        feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}

        # We perform one update step by evaluating the optimizer op
        _, loss_val, summaries = sess.run([optimizer, loss, merged_summaries],
                                          feed_dict=feed_dict)
        average_loss += loss_val

        if step % reportPeriod == 0:
            if step > 0: average_loss /= reportPeriod
            train_writer.add_summary(summaries, step)
            print('\nAverage loss at step', step, ':', average_loss)
            average_loss = 0

        # Note that this is expensive (~20% slowdown if computed every 500 steps)
        if step % validation_period == 0:
            sim = similarity.eval()
            for i, validationExample in enumerate(validationExamples):
                valid_word = reverse_dictionary[validationExample]
                top_k = 8  # number of nearest neighbors
                nearest = (-sim[i, :]).argsort()[1:top_k + 1]
                log_str = 'Top %d Nearest to %s: %s' % (
                    top_k, valid_word, ' '.join(reverse_dictionary[nearest[k]]
                                                for k in range(top_k)))
                print(log_str)

    final_embeddings = normalized_embeddings.eval()
    tokens = [
        p[1] for p in sorted(reverse_dictionary.items(), key=lambda p: p[0])
        if p[1] != 'UNK'
    ]

    train_writer.flush()
    valid_writer.flush()

    ##### output to file
    if outputFilename:
        with open(outputFilename, 'w', encoding='utf8') as ofile:
            for i, token in enumerate(tokens):
                ofile.write(
                    '%s %s\n' %
                    (token, ' '.join(str(d) for d in final_embeddings[i, :])))
def main(x,
         y,
         training_fraction=0.80,
         learning_rate=0.001,
         epochs=1000,
         batch_size=1000,
         update_summary_at=100):
    """
    :param x: shape = m * 786
    :param y: shape = m * 10
    :param training_fraction:
    :param epochs:
    :param batch_size:
    :param update_summary_at:
    :return:
    """
    training_size = int(len(x) * training_fraction)

    # if last batch size is less than half of desired batch size then throwing exception.
    # In future, instead of throwing exception we may avoid using this last batch.

    assert training_size % batch_size == 0 or training_size % batch_size > batch_size / 2
    last_batch_size = training_size % batch_size

    _data = train_test_split(x,
                             y,
                             train_size=training_fraction,
                             stratify=y.argmax(1),
                             random_state=0)

    # training_data_x, training_data_y = x[:training_size], y[:training_size]
    # testing_data_x, testing_data_y = x[training_size:], y[training_size:]

    training_data_x, training_data_y = _data[0], _data[2]
    testing_data_x, testing_data_y = _data[1], _data[3]

    feature_size = training_data_x.shape[1]
    hidden_nu = 20
    output_size = training_data_y.shape[1]

    x = placeholder(float32, [None, feature_size], name='x')
    y = placeholder(float32, [None, output_size], name='y')

    # also check xavier_initializer
    W1 = Variable(random_normal([feature_size, hidden_nu],
                                seed=1,
                                dtype=float32),
                  name='W1')
    b1 = Variable(random_normal([hidden_nu], dtype=float32, seed=2),
                  name='b1')  # use zeros also

    W2 = Variable(random_normal([hidden_nu, output_size],
                                seed=3,
                                dtype=float32),
                  name='W2')
    b2 = Variable(random_normal([output_size], dtype=float32, seed=4),
                  name='b2')

    L0_L1 = x @ W1 + b1
    L1_L1 = nn.relu(L0_L1)

    L1_L2 = L1_L1 @ W2 + b2
    L2_L2 = nn.softmax(L1_L2)

    cost = reduce_mean(nn.softmax_cross_entropy_with_logits_v2(logits=L2_L2,
                                                               labels=y),
                       name='cost')

    optimization = train.AdamOptimizer(learning_rate=learning_rate).minimize(
        cost, name='optimization')

    init = global_variables_initializer()

    current_predictions = equal(argmax(L2_L2, axis=1), argmax(y, axis=1))

    accuracy = tf.round(
        10000 * reduce_mean(cast(current_predictions, float32))) / 100

    with Session() as sess:
        writer = summary.FileWriter('mnist/visualize', graph=sess.graph)

        cost_summary = summary.scalar('cost', cost)
        training_accuracy_summary = summary.scalar('training accuracy',
                                                   accuracy)
        testing_accuracy_summary = summary.scalar('testing accuracy', accuracy)

        sess.run(init)

        # ---------------------------------------------------------------------------------

        for e in range(epochs):

            _idx = RandomState(e).permutation(
                training_size)  # check how much does it matter to add
            # uniformity of data in each batch.

            total_cost = 0

            def mini_batch(start_idx, end_idx):
                curr_idx = _idx[start_idx:end_idx]

                _x = training_data_x[curr_idx]
                _y = training_data_y[curr_idx]

                _, c = sess.run([optimization, cost], feed_dict={x: _x, y: _y})

                return (end_idx - start_idx) * c

            for i in range(0, training_size, batch_size):
                total_cost += mini_batch(i, min(i + batch_size, training_size))

            if last_batch_size != 0:
                total_cost += mini_batch(training_size - last_batch_size,
                                         training_size)

            print('epoch:', e, 'total cost:', round(
                total_cost,
                3))  # check how this 'total_cost' can be fed into summary.

            if e % update_summary_at == 0:
                _total_cost, training_accuracy = sess.run(
                    [cost_summary, training_accuracy_summary],
                    feed_dict={
                        x: training_data_x,
                        y: training_data_y
                    })
                writer.add_summary(_total_cost, e)
                writer.add_summary(training_accuracy, e)

                testing_accuracy = sess.run(testing_accuracy_summary,
                                            feed_dict={
                                                x: testing_data_x,
                                                y: testing_data_y
                                            })
                writer.add_summary(testing_accuracy, e)

        writer.close()
Example #12
0
 def __init__(self, logdir):
     self.writer = summary.FileWriter(logdir)