Ejemplo n.º 1
0
    def run(self):

        if os.path.exists(self.model_dir):
            latest_step = utils.get_latest_file(self.model_dir)
            self.agent.load(self.model_dir, latest_step)

        self.evaluate()
Ejemplo n.º 2
0
def convert_csv(cursor, mag_id, xml_template_file, output_file):
    logging.info("Converting CSV")
    latest_file = get_latest_file(DESTINATION)
    root, regels = get_xml_file_insert(xml_template_file)

    df = pd.read_csv(latest_file, sep=';')
    articles = ', '.join('\'{}\''.format(str(row['Uw artikelnr.']).strip()) for _, row in df.iterrows())
    sql_query = 'SELECT ArtCode, ArtIsPartijRegistreren, KingSystem.tabArtikelPartij.ArtPartijNummer as ArtPartijNummer \
        from KingSystem.tabArtikel LEFT JOIN KingSystem.tabArtikelPartij \
        ON KingSystem.tabArtikel.ArtGid=KingSystem.tabArtikelPartij.ArtPartijArtGid \
        WHERE (KingSystem.tabArtikelPartij.ArtPartijIsGeblokkeerdVoorVerkoop = 0 OR KingSystem.tabArtikel.ArtIsPartijRegistreren = 0) AND \
        KingSystem.tabArtikel.ArtCode in ({})'.format(articles)

    # Fetch partij information
    cursor.execute(sql_query)
    rows = cursor.fetchall()
    articles = {}
    for row in rows:
        articles[row.ArtCode] = Article(row.ArtCode, row.ArtPartijNummer, row.ArtIsPartijRegistreren)

    print(len(rows), len(df))
    for i, row in df.iterrows():
        art_id = row['Uw artikelnr.'].strip()
        article = articles[art_id]
        # amount = row['Totaal excl. inslag']
        amount = row['Aantal eenheden']

        if article.partijregistratie:
            add_xml(regels, art_id, str(amount).strip(), mag_id, article.partijnummer)
        else:
            add_xml(regels, art_id, str(amount).strip(), mag_id)

    write_xml(output_file, root)
    os.remove(latest_file)
Ejemplo n.º 3
0
    def run(self):
        if os.path.exists(self.model_dir):
            latest_step = utils.get_latest_file(self.model_dir)
            self.agent.load(self.model_dir, latest_step)
        else:
            raise ValueError('Could not reload weights!')

        self.evaluate()
Ejemplo n.º 4
0
def visualize_latest_from_visualization_folder(folder='./visualizations/', file=None):
  if file is None:
    file = ut.get_latest_file(folder, filter=r'.*\d+\.txt$')
    ut.print_info('Encoding file: %s' % file.split('/')[-1])
  data = np.loadtxt(file)  # [0:360]
  fig = plt.figure()
  vi.visualize_encodings(data, fast=fast, fig=fig,  interactive=True)
  fig.suptitle(file.split('/')[-1])
  fig.tight_layout()
  plt.show()
Ejemplo n.º 5
0
def visualize_latest_from_visualization_folder(folder='./visualizations/',
                                               file=None):
    if file is None:
        file = ut.get_latest_file(folder, filter=r'.*\d+\.txt$')
        ut.print_info('Encoding file: %s' % file.split('/')[-1])
    data = np.loadtxt(file)  # [0:360]
    fig = plt.figure()
    vi.visualize_encodings(data, fast=fast, fig=fig, interactive=True)
    fig.suptitle(file.split('/')[-1])
    fig.tight_layout()
    plt.show()
Ejemplo n.º 6
0
def visualize_from_checkpoint(checkpoint, epoch=None):
  assert os.path.exists(checkpoint)
  FLAGS.load_from_checkpoint = checkpoint
  file_filter = r'.*\d+\.txt$' if epoch is None else r'.*e\|%d.*' % epoch
  latest_file = ut.get_latest_file(folder=checkpoint, filter=file_filter)
  print(latest_file)
  ut.print_info('Encoding file: %s' % latest_file.split('/')[-1])
  data = np.loadtxt(latest_file)
  fig = plt.figure()
  fig.set_size_inches(fig.get_size_inches()[0] * 2, fig.get_size_inches()[1] * 2)
  entity = EncodingVisualizer(fig, data)
  # fig.tight_layout()
  plt.show()
Ejemplo n.º 7
0
def visualize_from_checkpoint(checkpoint, epoch=None):
    assert os.path.exists(checkpoint)
    FLAGS.load_from_checkpoint = checkpoint
    file_filter = r'.*\d+\.txt$' if epoch is None else r'.*e\|%d.*' % epoch
    latest_file = ut.get_latest_file(folder=checkpoint, filter=file_filter)
    print(latest_file)
    ut.print_info('Encoding file: %s' % latest_file.split('/')[-1])
    data = np.loadtxt(latest_file)
    fig = plt.figure()
    fig.set_size_inches(fig.get_size_inches()[0] * 2,
                        fig.get_size_inches()[1] * 2)
    entity = EncodingVisualizer(fig, data)
    # fig.tight_layout()
    plt.show()
Ejemplo n.º 8
0
def process_file(
        output_handler,
        data_path,
        target,
        file_name=None,
        ndx=None
):
    """
    Function to open and process a file of CSV summary reports
    """
    if not file_name:
        file_name = get_latest_file(data_path)
    doc_gen = target_module.DocGenerator(file_name)
    data_file = join(data_path, file_name)
    if not ndx:
        ndx = target.DEFAULT_INDEX
    with open(data_file, encoding=target.DEFAULT_FILE_ENCODING) as csv_read:
        csv_reader = csv.DictReader(csv_read)
        output_handler(
            ES, doc_gen.generate(csv_reader, ndx)
        )
    return
Ejemplo n.º 9
0
        lock = lock.replace("@", " @LOCATION: ")
        funcs[func] = PathCall(lock)
    return funcs


if __name__ == '__main__':
    parser = optparse.OptionParser()
    parser.add_option("--promo", "-p", help="list of promo log", default=None)
    (opts, args) = parser.parse_args()

    # load other targets
    if opts.promo:
        BOOTSTRAP.update(load_promo_log(opts.promo).keys())

    utils.install_pdb()

    # pick the latest one from /tmp
    log_d = utils.get_latest_file("/tmp/fss-*/")
    if log_d is None:
        # second choice
        log_d = os.path.join(ROOT, "data", "sample-fss-output")
    # perfered one
    if len(args) >= 1:
        log_d = args[0]

    # enable trace output on unittest
    if not "unit" in log_d:
        dbg.quiet(["trace"])

    analyze_lock_promo(log_d)
Ejemplo n.º 10
0
def get_model(input_shape,
              weights_dir,
              resume,
              bayesian,
              vnet,
              prior_std,
              kernel_size,
              activation,
              padding,
              alpha,
              num_gpus,
              scale_factor=1,
              weights_path=None):
    """Loads or creates model.

    If a weights path is specified, loads from that path. Otherwise, loads
    the most recently modified model.
    """

    os.makedirs(weights_dir + "/bayesian", exist_ok=True)
    os.makedirs(weights_dir + "/dropout", exist_ok=True)

    # Sets variables for bayesian model.
    if bayesian:
        checkpoint_path = (weights_dir + "/bayesian/bayesian-{epoch:02d}"
                           "-{val_acc:.3f}-{val_loss:.0f}.h5")

        if weights_path:
            latest_weights_path = weights_path
        else:
            latest_weights_path = get_latest_file(weights_dir + "/bayesian")

        net = bayesian_vnet if vnet else bayesian_unet

    # Sets variables for dropout model.
    else:
        checkpoint_path = (weights_dir + "/dropout/dropout-{epoch:02d}"
                           "-{val_acc:.3f}-{val_loss:.2f}.h5")

        if weights_path:
            latest_weights_path = weights_path
        else:
            latest_weights_path = get_latest_file(weights_dir + "/dropout")

        net = dropout_vnet if vnet else dropout_unet

    # Loads or creates model.
    if latest_weights_path and resume:
        model = load_model(input_shape, latest_weights_path, net)
    else:
        model = net(input_shape,
                    kernel_size=kernel_size,
                    activation=activation,
                    padding=padding,
                    prior_std=prior_std)

    # Prints model summary.
    model.summary(line_length=127)

    # Converts to multi-gpu model if applicable.
    if num_gpus > 1:
        model = multi_gpu_model(model, gpus=num_gpus)

    # Sets loss function.
    if bayesian:
        loss = variational_free_energy_loss(model, scale_factor, alpha)
    else:
        loss = binary_crossentropy

    # Compiles model with Adam optimizer.
    model.compile(loss=loss, optimizer=Adam(), metrics=["accuracy"])

    return model, checkpoint_path
Ejemplo n.º 11
0
        lock = lock.strip()
        lock = lock.replace("@", " @LOCATION: ")
        funcs[func] = PathCall(lock)
    return funcs

if __name__ == '__main__':
    parser = optparse.OptionParser()
    parser.add_option("--promo", "-p", help="list of promo log", default=None)
    (opts, args) = parser.parse_args()

    # load other targets
    if opts.promo:
        BOOTSTRAP.update(load_promo_log(opts.promo).keys())

    utils.install_pdb()

    # pick the latest one from /tmp
    log_d = utils.get_latest_file("/tmp/fss-*/")
    if log_d is None:
        # second choice
        log_d = os.path.join(ROOT, "data", "sample-fss-output")
    # perfered one
    if len(args) >= 1:
        log_d = args[0]

    # enable trace output on unittest
    if not "unit" in log_d:
        dbg.quiet(["trace"])

    analyze_lock_promo(log_d)
Ejemplo n.º 12
0
    data = np.loadtxt(latest_file)
    fig = plt.figure()
    fig.set_size_inches(fig.get_size_inches()[0] * 2,
                        fig.get_size_inches()[1] * 2)
    entity = EncodingVisualizer(fig, data)
    # fig.tight_layout()
    plt.show()


fast = True

if __name__ == '__main__':

    cwd = os.getcwd()
    # cwd = '/mnt/code/vd/TensorFlow_DCIGN/tmp/pred.16c3s2_32c3s2_32c3s2_16c3_f80_f8__i_grid.28c.4'
    latest = ut.get_latest_file(cwd, filter=r'.*_suf\.encodings\.npy$')
    print(latest)
    data = np.load(latest).item()
    # print(type(data))
    # i = data.item()
    # print(type(i))
    # print(i.shape)
    # print(data['enc'])

    # print(data)
    x = data['enc']

    # print(x)

    fig = vis.plot_encoding_crosssection(x,
                                         '',
Ejemplo n.º 13
0
def get_evaluation(path):
    f = ut.get_latest_file(path, filter=r'.*.npy$')
    print(f)
    dict = np.load(f).item()
    return dict
Ejemplo n.º 14
0
Archivo: train.py Proyecto: melfm/ibit
    def run(self):
        init_env = None
        keys_to_sample = random.sample(list(self.train_envs),
                                       self.num_train_envs)
        sampled_train_envs = {
            key: self.train_envs[key]
            for key in keys_to_sample
        }
        # Better way to access first elem of OrderedDict?
        for env_idx, env in sampled_train_envs.items():
            init_env = env
            break
        episode, episode_reward, episode_step, done = [0] * self.num_train_envs, [0] * self.num_train_envs, \
            [0] * self.num_train_envs, [True] * self.num_train_envs
        obs, next_obs = [init_env.reset()] * self.num_train_envs, [
            init_env.reset()
        ] * self.num_train_envs
        start_time = time.time()

        train_recording = False
        env_to_rec = 0

        if self.reload_weights and os.path.exists(self.model_dir):
            # Continue training
            try:
                latest_step = utils.get_latest_file(self.model_dir)
                self.agent.load(self.model_dir, latest_step)
            except:
                print('Could not reload weights!')
        while self.step[0] < self.cfg.num_train_steps:

            if self.resample_envs and self.step[
                    0] > 0 and self.step[0] % self.env_resample_rate == 0:
                keys_to_sample = random.sample(list(self.train_envs),
                                               self.num_train_envs)
                sampled_train_envs = {
                    key: self.train_envs[key]
                    for key in keys_to_sample
                }

            for env_idx, (env_tag,
                          env) in enumerate(sampled_train_envs.items()):
                episode_step[env_idx] = 0
                while (episode_step[env_idx] <= env._max_episode_steps - 1):
                    if not train_recording and env_idx == env_to_rec and self.step[
                            env_idx] % self.train_vid_interval == 0:
                        train_recording = True
                        self.train_video_recorder.init(enabled=True)

                    if done[env_idx] or (episode_step[env_idx] >=
                                         env._max_episode_steps - 1):
                        if self.step[env_idx] > 0:
                            self.logger.log('train/duration',
                                            time.time() - start_time,
                                            self.step[env_idx])
                            start_time = time.time()

                        if self.step[
                                env_idx] > 0 and env_idx == env_to_rec and train_recording:
                            file_name = str(self.step[env_idx]) + '_' + env_tag
                            self.train_video_recorder.save(f'{file_name}.mp4')
                            self.train_video_recorder.frames = []
                            train_recording = False
                            env_to_rec = random.randint(
                                0,
                                len(sampled_train_envs) - 1)

                        # Evaluate agent periodically
                        if env_idx == 0 and episode[
                                env_idx] % self.cfg.eval_frequency == 0:
                            # Evaluate an env from training
                            self.logger.log('eval_seen/episode',
                                            episode[env_idx],
                                            self.step[env_idx])
                            eval_env = self.train_envs[random.sample(
                                list(self.train_envs), 1)[0]]
                            self.evaluate(phase='seen', eval_env=eval_env)
                            # Evaluate an unseen env
                            self.logger.log('eval_unseen/episode',
                                            episode[env_idx],
                                            self.step[env_idx])
                            eval_env = self.eval_envs[random.sample(
                                list(self.eval_envs), 1)[0]]
                            self.evaluate(phase='unseen', eval_env=eval_env)
                        if episode[env_idx] % self.cfg.ckpt_frequency == 0:
                            self.agent.save(self.model_dir, episode[env_idx])
                        self.logger.log('train/episode_reward',
                                        episode_reward[env_idx],
                                        self.step[env_idx])
                        obs[env_idx] = env.reset()
                        done[env_idx] = False
                        episode_reward[env_idx] = 0
                        episode[env_idx] += 1

                        self.logger.log('train/episode', episode[env_idx],
                                        self.step[env_idx])
                        self.logger.log('train/env_idx', env_tag,
                                        self.step[env_idx])

                    # sample action for data collection
                    if self.step[env_idx] < self.cfg.num_seed_steps:
                        action = env.action_space.sample()
                    else:
                        with utils.eval_mode(self.agent):
                            action = self.agent.act(obs[env_idx], sample=True)

                    next_obs[env_idx], reward, done[env_idx], _ = env.step(
                        action)
                    if train_recording and env_idx == env_to_rec:
                        self.train_video_recorder.record(env)

                    # allow infinite bootstrap
                    done[env_idx] = float(done[env_idx])
                    done_no_max = 0 if episode_step[
                        env_idx] + 1 == env._max_episode_steps - 1 else done[
                            env_idx]

                    episode_reward[env_idx] += reward

                    self.replay_buffer.add(env_idx, obs[env_idx], action,
                                           reward, next_obs[env_idx],
                                           done[env_idx], done_no_max)

                    obs[env_idx] = next_obs[env_idx]
                    episode_step[env_idx] += 1
                    self.step[env_idx] += 1

                    # Run training update
                    if self.step[env_idx] >= self.cfg.num_seed_steps:
                        #print('Running train update')
                        for _ in range(self.cfg.num_train_iters):
                            self.agent.update(self.replay_buffer,
                                              self.num_train_envs, self.logger,
                                              self.step[env_idx], env_tag,
                                              env_idx)
                # At the end of each episode, log
                self.logger.dump(
                    self.step[env_idx],
                    save=(self.step[env_idx] > self.cfg.num_seed_steps),
                    ty='train')