def __init__(self, loader, args, lr_scheduler, training=True, reg=True):
        self.widgets = [progressbar.ETA()]
        self.lr_scheduler = lr_scheduler
        if training:
            self.widgets.append(' training, ')
        else:
            self.widgets.append(' testing, ')
        self.widgets += [
            progressbar.Variable('loss'),
            ', ',
            progressbar.Variable('l2'),
            ', ',
            progressbar.Variable('recall'),
            ', ',
        ]
        if training:
            self.widgets.append(progressbar.Variable('lr'))

        self.template_points = torch.Tensor(loader.dataset.template_points).to(
            args.device)
        self.template_feats = torch.Tensor(loader.dataset.template_feats).to(
            args.device)
        self.total_loss = 0.0
        self.total_l2_loss_before_reg = torch.zeros(1)
        self.total_l2_loss_after_reg = torch.zeros(1)
        self.errors_before_reg = [0, 0, 0]
        self.errors_after_reg = [0, 0, 0]
        self.count = 0
        self.error_thresholds = [0.05, 0.1, 1e10]
        self.training, self.reg = (training, reg)
def test_variable_widget_widget():
    widgets = [
        ' [',
        progressbar.Timer(),
        '] ',
        progressbar.Bar(),
        ' (',
        progressbar.ETA(),
        ') ',
        progressbar.Variable('loss'),
        progressbar.Variable('text'),
        progressbar.Variable('error', precision=None),
        progressbar.Variable('missing'),
        progressbar.Variable('predefined'),
    ]

    p = progressbar.ProgressBar(widgets=widgets,
                                max_value=1000,
                                variables=dict(predefined='predefined'))
    p.start()
    print('time', time, time.sleep)
    for i in range(0, 200, 5):
        time.sleep(0.1)
        p.update(i + 1, loss=.5, text='spam', error=1)

    i += 1
    p.update(i, text=None)
    i += 1
    p.update(i, text=False)
    i += 1
    p.update(i, text=True, error='a')
    p.finish()
    def validate_epoch(self):
        pwidgets = [
            progressbar.Percentage(), " ",
            progressbar.Counter(format='%(value)02d/%(max_value)d'), " ",
            progressbar.Bar(), " ",
            progressbar.Timer(), ", ",
            progressbar.Variable('Top1', width=2, precision=4), ", ",
            progressbar.Variable('Top5', width=2, precision=4), ", ",
            progressbar.Variable('Loss', width=2, precision=4)
        ]
        pbar = progressbar.ProgressBar(widgets=pwidgets,
                                       max_value=self.val_batch_num,
                                       prefix="Val: ").start()

        self.val_loss.reset_states()
        self.val_accuracy_top1.reset_states()
        self.val_accuracy_top5.reset_states()

        for batch, (images, labels) in enumerate(self.val_data):
            self.validate_step(images, labels)

            pbar.update(batch,
                        Top1=self.val_accuracy_top1.result().numpy(),
                        Top5=self.val_accuracy_top5.result().numpy(),
                        Loss=self.val_loss.result().numpy())

        pbar.finish()
    def train_epoch(self, curr_epoch):

        pwidgets = [
            progressbar.Percentage(), " ",
            progressbar.Counter(format='%(value)02d/%(max_value)d'), " ",
            progressbar.Bar(), " ",
            progressbar.Timer(), ", ",
            progressbar.Variable('LR', width=1, precision=4), ", ",
            progressbar.Variable('Top1', width=2, precision=4), ", ",
            progressbar.Variable('Top5', width=2, precision=4), ", ",
            progressbar.Variable('Loss', width=2, precision=4)
        ]
        pbar = progressbar.ProgressBar(widgets=pwidgets,
                                       max_value=self.train_batch_num,
                                       prefix="Epoch {}/{}: ".format(
                                           curr_epoch, self.epochs)).start()

        self.train_loss.reset_states()
        self.train_accuracy_top1.reset_states()
        self.train_accuracy_top5.reset_states()

        for batch, (images, labels) in enumerate(self.train_data):
            loss = self.train_step(images, labels)
            self.train_loss(loss)
            pbar.update(batch,
                        LR=self.optimizer.learning_rate.numpy(),
                        Top1=self.train_accuracy_top1.result().numpy(),
                        Top5=self.train_accuracy_top5.result().numpy(),
                        Loss=self.train_loss.result().numpy())
        pbar.finish()
    def split_item_abstract_mapping(self, current_polytope, abstract_mapping):
        to_split = [current_polytope]
        processed = []
        relevant_polytopes = self.find_relevant_polytopes(current_polytope, abstract_mapping)

        widgets = [progressbar.Variable('splitting_queue'), ", ", progressbar.Variable('frontier_size'), ", ", progressbar.widgets.Timer()]
        # with progressbar.ProgressBar(prefix=f"Splitting states: ", widgets=widgets, is_terminal=True, term_width=200, redirect_stdout=True).start() as bar_split:
        while len(to_split) > 0:
            current_polytope = to_split.pop()
            split_happened = False
            for x in relevant_polytopes:
                choose = self.check_intersection(x, current_polytope)
                if choose:
                    dimensions_volume = self.find_important_dimensions(current_polytope, x)
                    if len(dimensions_volume) > 0:
                        assert len(dimensions_volume) > 0
                        min_idx = dimensions_volume[np.argmin(np.array(dimensions_volume)[:, 1])][0]
                        splits = split_polyhedron_milp(self.analysis_template, current_polytope, min_idx, x[min_idx])
                        to_split.append(tuple(splits[0]))
                        to_split.append(tuple(splits[1]))
                        split_happened = True
                        break
                # bar_split.update(splitting_queue=len(to_split), frontier_size=len(processed))
            if not split_happened:
                processed.append(current_polytope)

        # colours = []
        # for x, ranges_probs in frontier:
        #     colours.append(np.mean(ranges_probs[0]))
        # print("", file=sys.stderr)  # new line
        # fig = show_polygons(template, [x[0] for x in frontier] + to_split + processed, self.template_2d, colours + [0.5] * len(to_split) + [0.5] * len(processed))
        return processed
Beispiel #6
0
def cURL_links(links: list, use_proxy=False, append=False):
    n = LINKS_PER_BATCH
    link_batches = [
        links[i * n:(i + 1) * n] for i in range((len(links) + n - 1) // n)
    ]
    num_batches = len(link_batches)
    print("Processing [{}] links in [{}] batches of [{}] links each...".format(
        len(links), num_batches, n))

    widgets_curl = [
        ' [',
        progressbar.Timer(), '] ',
        progressbar.RotatingMarker(),
        progressbar.Bar(),
        progressbar.Percentage(), ' (',
        progressbar.AdaptiveETA(samples=datetime.timedelta(
            seconds=ETA_SAMPLE_DELTA)), ') ', '[',
        progressbar.SimpleProgress(), ']', '[',
        progressbar.Variable('links_left',
                             format='Links left in batch: {formatted_value}',
                             width=2), '/',
        progressbar.Variable('links_todo', format='{formatted_value}',
                             width=2), ']'
    ]

    timeouts = 0
    link_unique_curl = {}
    fields = ('msgid', 'href', 'str', 'status', 'effective-url',
              'redirect-count', 'sha256_link')
    mode = 'w'
    if append:
        mode = 'a'
    with open(DEDUP_CURL_LINK_FILE, mode, newline='',
              encoding='utf-8') as curl_csv:
        writer = csv.DictWriter(curl_csv, fieldnames=fields)
        if not append:
            writer.writeheader()
        with progressbar.bar.ProgressBar(widgets=widgets_curl,
                                         redirect_stdout=True) as pbar:
            pbar.max_value = num_batches
            for b_num, batch in pbar(zip(range(num_batches), link_batches)):
                print('\nProcessing batch {} ...'.format(b_num + 1))
                cbatch = build_link_batch(batch, use_proxy)
                timeouts += process_curl_batch(cbatch, pbar, use_proxy)
                for link in batch:
                    link_unique_curl['sha256_link'] = link
                    writer.writerow({k: link[k] for k in link if k in fields})
        print("")

    print("Procceed links with [{}] timeout events".format(timeouts))

    return (links, link_unique_curl)
Beispiel #7
0
    def generate_allWeatherData(self, startDate, endDate):
        stationsData = self.csvHandler._loadData('stations.csv')[0]

        numberOfStations = len(stationsData)
        progressBarWidget = [
            progressbar.Percentage(),
            ' ',
            progressbar.Bar('#', '|', '|'),
            ' ',
            progressbar.Variable('FIPS', width=12, precision=12),
            ' ',
            progressbar.Variable('ID', width=12, precision=12),
        ]
        progressBar = progressbar.ProgressBar(maxval=numberOfStations,
                                              widgets=progressBarWidget,
                                              redirect_stdout=True)
        progressBar.start()

        step = 0
        try:
            logFile = open('weather.log', 'r')
            step = int(logFile.read(), 10)
            logFile.close()
        except:
            logFile = open('weather.log', 'w')
            logFile.write(str(step))
            logFile.close()

        for i in range(step, numberOfStations):
            with open('weather.log', 'w') as logFile:
                logFile.write(str(i))

            stationID = stationsData[i]['id'].split(':')[1]
            countyFips = stationsData[i]['county_fips']
            progressBar.update(i, FIPS=countyFips, ID=stationID)
            # First step, create weather.csv file
            if i == 0:
                self.downloadHandler.get_countyWeatherData(
                    countyFips, stationID, startDate, endDate, 'weather.csv')
            # Other steps, merge new data to weather.csv file
            else:
                self.downloadHandler.get_countyWeatherData(
                    countyFips, stationID, startDate, endDate, 'temp.csv')
                self.csvHandler.merge_csvFiles_addRows('weather.csv',
                                                       'temp.csv',
                                                       'weather.csv')

        progressBar.finish()
        debug.debug_print("SUCCESS: data extracted (weather data)", 2)
Beispiel #8
0
    def split_item(self, template):
        '''takes an abstract mapping saved previously and splits a new input boundaries according to it'''
        frontier = pickle.load(open("new_frontier3.p", "rb"))
        root = self.generate_root_polytope(self.input_boundaries)
        chosen_polytopes = []
        to_split = [root]
        processed = []
        new_frontier = []
        for x, probs in frontier:
            choose = self.check_contained(x, root)
            if choose:
                new_frontier.append((x, probs))
        frontier = new_frontier  # shrink the selection of polytopes to only the ones which are relevant

        # colours = []
        # for x, ranges_probs in frontier:
        #     colours.append(np.mean(ranges_probs[0]))
        # print("", file=sys.stderr)  # new line
        # fig = show_polygons(template, [x[0] for x in frontier], self.template_2d, colours)

        widgets = [progressbar.Variable('splitting_queue'), ", ", progressbar.Variable('frontier_size'), ", ", progressbar.widgets.Timer()]
        with progressbar.ProgressBar(prefix=f"Splitting states: ", widgets=widgets, is_terminal=True, term_width=200, redirect_stdout=True).start() as bar_split:
            while len(to_split) > 0:
                current_polytope = to_split.pop()
                split_happened = False
                for x, probs in frontier:
                    choose = self.check_contained(x, current_polytope)
                    if choose:
                        dimensions_volume = self.find_important_dimensions(current_polytope, x)
                        if len(dimensions_volume) > 0:
                            assert len(dimensions_volume) > 0
                            min_idx = dimensions_volume[np.argmin(np.array(dimensions_volume)[:, 1])][0]
                            splits = split_polyhedron_milp(self.analysis_template, current_polytope, min_idx, x[min_idx])
                            chosen_polytopes.append((x, probs))
                            to_split.append(tuple(splits[0]))
                            to_split.append(tuple(splits[1]))
                            split_happened = True
                            break
                    bar_split.update(splitting_queue=len(to_split), frontier_size=len(processed))
                if not split_happened:
                    processed.append(current_polytope)

        # colours = []
        # for x, ranges_probs in frontier:
        #     colours.append(np.mean(ranges_probs[0]))
        # print("", file=sys.stderr)  # new line
        # fig = show_polygons(template, [x[0] for x in frontier] + to_split + processed, self.template_2d, colours + [0.5] * len(to_split) + [0.5] * len(processed))
        return processed
Beispiel #9
0
 def create_progress_bar(self, iterations):
     widgets = [
         progressbar.Variable("iter", format="{name}: {value}"),
         " | ",
         progressbar.Variable("loss", format="{name}: {value:0.3e}"),
         " ",
         progressbar.Bar(marker="■", fill="·"),
         " ",
         progressbar.Percentage(),
         " | ",
         progressbar.Timer(format='elapsed: %(elapsed)s'),
     ]
     ProgressBar = progressbar.NullBar if self.quiet else progressbar.ProgressBar
     return ProgressBar(max_value=iterations,
                        widgets=widgets,
                        variables={"loss": float("+inf")})
Beispiel #10
0
def run(agent):

  config = agent.config
  episodes = config.get('episodes', 1)

  train_mode = config.get('train_mode', True)
  if train_mode:
    timer = pb.ProgressBar(
      widgets=[
        'Episode: ', 
        pb.SimpleProgress(), ' ',
        pb.Variable('Score'), ' ',
        pb.AdaptiveETA()
      ],
      maxval=episodes
    ).start()

  scores = []
  for i in range(1, episodes+1):
    score = episode(agent)
    scores.append(score)
    if train_mode:
      timer.update(i, Score=score)
  if train_mode:
    timer.finish()
  return scores
Beispiel #11
0
    def run(self, seed_img, critics):
        """Run the optimizer on the image according to the loss returned by the critics.
        """
        image = seed_img.to(self.device).requires_grad_(True)

        obj = MultiCriticObjective(self.encoder, critics)
        opt = SolverLBFGS(obj, image, lr=self.lr)

        widgets = [
            progressbar.SimpleProgress(),
            " | ",
            progressbar.Variable("loss", format="{name}: {value:0.3e}"),
            " ",
            progressbar.Bar(marker="■", fill="·"),
            " ",
            progressbar.ETA(),
        ]
        progress = progressbar.ProgressBar(
            max_value=self.max_iter, widgets=widgets, variables={"loss": float("+inf")}
        )

        try:
            for i, loss in self._iterate(opt):
                # Update the progress bar with the result!
                progress.update(i, loss=loss)
                # Constrain the image to the valid color range.
                image.data.clamp_(0.0, 1.0)
                # Return back to the user...
                yield loss, image

            progress.max_value = i
        finally:
            progress.finish()
    def check_split(self, t, x, x_label, nn, bar_main, stats, template, template_2d) -> List[Tuple[Any, Any]]:
        # -------splitting
        pre_nn = self.get_pre_nn()
        # self.find_direction_split(template,x,nn,pre_nn)
        ranges_probs = self.sample_probabilities(template, x, nn, pre_nn)  # sampled version
        if not is_split_range(ranges_probs, self.max_probability_split):  # refine only if the range is small
            ranges_probs = self.create_range_bounds_model(template, x, self.env_input_size, nn)
        new_frontier = []
        to_split = []
        n_splits = 0
        to_split.append((x, ranges_probs))
        print("", file=sys.stderr)  # new line
        widgets = [progressbar.Variable('splitting_queue'), ", ", progressbar.Variable('frontier_size'), ", ", progressbar.widgets.Timer()]
        with progressbar.ProgressBar(prefix=f"Splitting states: ", widgets=widgets, is_terminal=True, term_width=200, redirect_stdout=True).start() as bar_split:
            while len(to_split) != 0:
                bar_split.update(value=bar_split.value + 1, splitting_queue=len(to_split), frontier_size=len(new_frontier))
                to_analyse, ranges_probs = to_split.pop()
                action_to_split = np.nanargmax([x[1] - x[0] for x in ranges_probs])
                split_flag = is_split_range(ranges_probs, self.max_probability_split)
                can_be_split = self.can_be_splitted(template, to_analyse)
                if split_flag and can_be_split:
                    split1, split2 = sample_and_split(self.get_pre_nn(), nn, template, np.array(to_analyse), self.env_input_size, template_2d, minimum_length=self.minimum_length,
                                                      action=action_to_split)
                    n_splits += 1
                    if split1 is None or split2 is None:
                        split1, split2 = sample_and_split(self.get_pre_nn(), nn, template, np.array(to_analyse), self.env_input_size, template_2d)
                    ranges_probs1 = self.sample_probabilities(template, split1, nn, pre_nn)  # sampled version
                    if not is_split_range(ranges_probs1, self.max_probability_split):  # refine only if the range is small
                        ranges_probs1 = self.create_range_bounds_model(template, split1, self.env_input_size, nn)
                    ranges_probs2 = self.sample_probabilities(template, split2, nn, pre_nn)  # sampled version
                    if not is_split_range(ranges_probs2, self.max_probability_split):  # refine only if the range is small
                        ranges_probs2 = self.create_range_bounds_model(template, split2, self.env_input_size, nn)
                    to_split.append((tuple(split1), ranges_probs1))
                    to_split.append((tuple(split2), ranges_probs2))

                else:
                    new_frontier.append((to_analyse, ranges_probs))
                    # plot_frontier(new_frontier)

        # colours = []
        # for x, ranges_probs in new_frontier + to_split:
        #     colours.append(np.mean(ranges_probs[0]))
        # print("", file=sys.stderr)  # new line
        # fig = show_polygons(template, [x[0] for x in new_frontier + to_split], template_2d, colours)
        # fig.write_html("new_frontier.html")
        print("", file=sys.stderr)  # new line
        return new_frontier
Beispiel #13
0
    def train_batch(self, dataset):
        if self.num_batches is not None:
            max_size = str(self.num_batches)
        else:
            max_size = '???'

        widgets = [
            '  ',
            progressbar.Counter(),
            '/',
            max_size,
            ' ',
            progressbar.Variable("Loss"),
            ' ',
            progressbar.Bar(),
            ' [',
            progressbar.Variable("TrainTime"),
            '] ',
            ' (',
            progressbar.ETA(),
            ') ',
        ]

        with progressbar.ProgressBar(widgets=widgets,
                                     max_value=self.num_batches) as bar:
            self.num_batches = 0
            t0 = time.time()
            for batch in dataset:
                self.num_batches += 1
                self.ckpt.step.assign_add(1)

                ret = self.model.train_step(batch)
                time_str = str(
                    datetime.timedelta(
                        seconds=int(self.ckpt.train_time.numpy())))
                bar.update(self.num_batches,
                           Loss=ret['loss'].numpy(),
                           TrainTime=time_str)
                self.write_summary(ret)
                self.ckpt.train_time.assign_add(time.time() - t0)
                t0 = time.time()

        save_path = self.manager.save()
        print("Saved checkpoint for step {}: {}".format(
            int(self.ckpt.step), save_path))
        print("loss {:1.3f}".format(ret['loss'].numpy()))
def get_training_callback(n_steps):
    widgets = [
        progressbar.Bar(),
        progressbar.Percentage(), ' |',
        progressbar.Timer(), '|',
        progressbar.ETA(), '|',
        progressbar.Variable('loss', width=6, precision=4), ', ',
        progressbar.Variable('lr', width=8, precision=3)
    ]
    bar = progressbar.ProgressBar(max_value=n_steps, widgets=widgets)

    def update_progressbar(i, n_steps, loss_history, opt):
        loss = np.mean(loss_history[-50:])
        lr = float(opt._decayed_lr(tf.float32))
        bar.update(i + 1, loss=loss, lr=lr)

    return update_progressbar
 def main_loop(self, nn, template, template_2d):
     root = self.generate_root_polytope()
     root_pair = (root, 0)  # label for root is always 0
     root_list = [root_pair]
     if not self.load_graph:
         stats = Experiment.LoopStats()
         stats.root = root_pair
         stats.start_time = datetime.datetime.now()
         if self.additional_seen_fn is not None:
             for extra in self.additional_seen_fn():
                 stats.seen.append(extra)
         stats.frontier = [(0, x) for x in root_list]
         if self.graph is not None:
             self.graph.add_node(root_pair)
         widgets = [
             progressbar.Variable('n_workers'), ', ',
             progressbar.Variable('frontier'), ', ',
             progressbar.Variable('seen'), ', ',
             progressbar.Variable('num_already_visited'), ", ",
             progressbar.Variable('max_t'), ", ",
             progressbar.widgets.Timer()
         ]
         if self.before_start_fn is not None:
             self.before_start_fn(nn)
         with progressbar.ProgressBar(
                 widgets=widgets) if self.show_progressbar else nullcontext(
                 ) as bar_main:
             while len(stats.frontier) != 0 or len(stats.proc_ids) != 0:
                 self.inner_loop_step(stats, template_2d, template, nn,
                                      bar_main)
         self.plot_fn(stats.vertices_list, template, template_2d)
         # gateway, mc, mdp, mapping = recreate_prism_PPO(self.graph, root_pair)
         # inv_map = {v: k for k, v in mapping.items()}
         stats.end_time = datetime.datetime.now()
         networkx.write_gpickle(self.graph,
                                os.path.join(self.save_dir, "graph.p"))
         pickle.dump(stats,
                     open(os.path.join(self.save_dir, "stats.p"), "wb"))
     else:
         self.graph = networkx.read_gpickle(
             os.path.join(self.save_dir, "graph.p"))
         stats: Experiment.LoopStats = pickle.load(
             open(os.path.join(self.save_dir, "stats.p"), "rb"))
         # gateway, mc, mdp, mapping = recreate_prism_PPO(self.graph, root_pair)
     return stats.max_t, stats.num_already_visited, stats.vertices_list, stats.is_agent_unsafe
def test(model, loader, args, epoch):
    model.eval()

    total_loss = total_l2dist = 0.0
    widgets = [
        progressbar.ETA(),
        ' testing, ',
        progressbar.Variable('avg_loss'),
        ', ',
        progressbar.Variable('avg_l2dist'),
        ', ',
        progressbar.Variable('count'),
        ', ',
    ]
    template_points = torch.Tensor(loader.dataset.template_points).to(
        args.device)
    #template_feats = torch.Tensor(loader.dataset.template_feats).to(args.device)

    with torch.no_grad():
        with progressbar.ProgressBar(max_value=len(loader),
                                     widgets=widgets) as bar:
            for i, data_list in enumerate(loader):
                out = model(data_list)
                y = torch.cat([data.y for data in data_list]).to(out.device)
                loss = F.nll_loss(out, y)
                with torch.no_grad():
                    pred = out.max(1)[1]
                    l2dist = ((
                        (template_points[pred, :] -
                         template_points[y, :])**2).sum(-1).sqrt().mean())
                    total_l2dist += l2dist.item()
                    total_loss += loss.item()
                if (i % 1000 == 0) or (i == len(loader) - 1):
                    bar.update(
                        i,
                        avg_loss=total_loss / (i + 1.0),
                        avg_l2dist=total_l2dist / (i + 1.0),
                        count=(i + 1) * args.batch_size,
                    )

    torch.cuda.empty_cache()
    return {
        'avg_l2dist': total_l2dist / len(loader),
        'avg_loss': total_loss / len(loader)
    }
Beispiel #17
0
def variables():
    # Use progressbar.Variable to keep track of some parameter(s) during
    # your calculations
    widgets = [
        progressbar.Percentage(),
        progressbar.Bar(),
        progressbar.Variable('loss'),
        ', ',
        progressbar.Variable('username', width=12, precision=12),
    ]
    with progressbar.ProgressBar(max_value=100, widgets=widgets) as bar:
        min_so_far = 1
        for i in range(100):
            time.sleep(0.01)
            val = random.random()
            if val < min_so_far:
                min_so_far = val
            bar.update(i, loss=min_so_far, username='******')
Beispiel #18
0
def test(model, loader, args, epoch):
    model.eval()

    total_loss = total_l2dist = 0.0
    widgets = [
        progressbar.ETA(),
        ' testing, ',
        progressbar.Variable('avg_loss'),
        ', ',
        progressbar.Variable('avg_l2dist'),
        ', ',
        progressbar.Variable('count'),
        ', ',
    ]
    template_points = torch.Tensor(loader.dataset.template_points).to(
        args.device)
    template_feats = torch.Tensor(loader.dataset.template_feats).to(
        args.device)

    with torch.no_grad():
        with progressbar.ProgressBar(max_value=len(loader),
                                     widgets=widgets) as bar:
            for i, data in enumerate(loader):
                data = data.to(args.device)
                out = model(data)
                loss = F.nll_loss(out, data.y)
                pred = out.max(1)[1]
                l2dist = (((template_points[pred, :] -
                            gt_points)**2).sum(-1).sqrt().mean())
                total_l2dist += l2dist.item()
                total_loss += loss.item()
                bar.update(
                    i,
                    avg_loss=total_loss / (i + 1.0),
                    avg_l2dist=total_l2dist / (i + 1.0),
                    count=(i + 1) * args.batch_size,
                    #malloc=torch.cuda.memory_allocated(device=args.device)/1024.0**2,
                    #cache=torch.cuda.memory_cached(device=args.device)/1024.0**2,
                )

    return {
        'avg_l2dist': total_l2dist / len(loader),
        'avg_loss': total_loss / len(loader)
    }
Beispiel #19
0
    def check_split(self, x, nn, template, template_2d) -> List:
        # -------splitting
        pre_nn = self.get_pre_nn()
        # self.find_direction_split(template,x,nn,pre_nn)
        ranges_probs = self.sample_probabilities(template, x, nn, pre_nn)  # sampled version
        if not is_split_range(ranges_probs, self.max_probability_split) and self.use_milp_range_prob:  # refine only if the range is small
            ranges_probs = self.create_range_bounds_model(template, x, self.env_input_size, nn)
        new_frontier = []
        to_split = []
        n_splits = 0
        to_split.append((x, ranges_probs))
        widgets = [progressbar.Variable('splitting_queue'), ", ", progressbar.Variable('frontier_size'), ", ", progressbar.widgets.Timer()]
        with progressbar.ProgressBar(prefix=f"Splitting states: ", widgets=widgets, is_terminal=True, term_width=200, redirect_stdout=True).start() as bar_split:
            while len(to_split) != 0:
                bar_split.update(value=bar_split.value + 1, splitting_queue=len(to_split), frontier_size=len(new_frontier))
                to_analyse, ranges_probs = to_split.pop()
                split_flag = is_split_range(ranges_probs, self.max_probability_split)
                can_be_split = self.can_be_split(template, to_analyse)
                action_to_split = np.nanargmax([x[1] - x[0] for x in ranges_probs])
                if split_flag and can_be_split:
                    split1, split2 = sample_and_split(pre_nn, nn, template, np.array(to_analyse), self.env_input_size, template_2d, action=action_to_split,
                                                      minimum_length=self.minimum_length, use_softmax=self.use_softmax)
                    n_splits += 1
                    if split1 is None or split2 is None:
                        split1, split2 = sample_and_split(pre_nn, nn, template, np.array(to_analyse), self.env_input_size, template_2d, action=action_to_split,
                                                          minimum_length=self.minimum_length, use_softmax=self.use_softmax)
                    ranges_probs1 = self.sample_probabilities(template, split1, nn, pre_nn)  # sampled version
                    if not is_split_range(ranges_probs1, self.max_probability_split) and self.use_milp_range_prob:  # refine only if the range is small
                        ranges_probs1 = self.create_range_bounds_model(template, split1, self.env_input_size, nn)
                    ranges_probs2 = self.sample_probabilities(template, split2, nn, pre_nn)  # sampled version
                    if not is_split_range(ranges_probs2, self.max_probability_split) and self.use_milp_range_prob:  # refine only if the range is small
                        ranges_probs2 = self.create_range_bounds_model(template, split2, self.env_input_size, nn)
                    to_split.append((tuple(split1), ranges_probs1))
                    to_split.append((tuple(split2), ranges_probs2))

                else:
                    new_frontier.append((to_analyse, ranges_probs))
                    # plot_frontier(new_frontier)

        pickle.dump(new_frontier, open("new_frontier3.p", "wb"))
        self.plot_frontier(new_frontier + to_split)
        print("", file=sys.stderr)  # new line
        return new_frontier
Beispiel #20
0
    def __init__(self, max_value, log_dir=None, comment=None):

        if log_dir is not None:
            assert isinstance(log_dir, str)
            from tensorboardX import SummaryWriter
            # TODO
            # time_string = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time()))
            if comment is None:
                comment = ""
            else:
                comment = comment + "-"
            self.board_writer = SummaryWriter(
                "{}/{}{}-lr-finder-tensorboard".format(log_dir, comment,
                                                       time_string))
        else:
            self.board_writer = None

        widgets = [
            progressbar.Percentage(format='%(percentage)3.2f%%'), " | ",
            "Iter:",
            progressbar.Variable('current_iter',
                                 format='{formatted_value}',
                                 width=0,
                                 precision=0), "/{0}".format(max_value), ", ",
            progressbar.Variable('snapshot',
                                 format='{formatted_value}',
                                 width=8,
                                 precision=0), " (",
            progressbar.Timer(format='ELA: %(elapsed)s'), ", ",
            progressbar.AdaptiveETA(), ")"
        ]

        self.bar = progressbar.ProgressBar(max_value=max_value,
                                           widgets=widgets,
                                           redirect_stdout=True)

        # Use multi-process for update.
        self.queue = Queue()
        self.process = Process(target=self._update, daemon=True)
        self.process.start()
Beispiel #21
0
 def create_progress_bar(max_value):
     widgets = [
         progressbar.Percentage(),
         progressbar.Bar(), ' Encoded: ',
         progressbar.Counter(), ', ',
         progressbar.Variable('Dropped'), ', ',
         progressbar.AdaptiveETA(), ' ',
         progressbar.Timer()
     ]
     return progressbar.ProgressBar(max_value=max_value,
                                    widgets=widgets,
                                    max_error=False,
                                    redirect_stdout=True).start()
Beispiel #22
0
 def create_progress_bar(self, iterations):
     widgets = [
         progressbar.SimpleProgress(),
         " | ",
         progressbar.Variable("loss", format="{name}: {value:0.3e}"),
         " ",
         progressbar.Bar(marker="■", fill="·"),
         " ",
         progressbar.ETA(),
     ]
     ProgressBar = progressbar.NullBar if self.quiet else progressbar.ProgressBar
     return ProgressBar(
         max_value=iterations, widgets=widgets, variables={"loss": float("+inf")}
     )
Beispiel #23
0
    def __init__(self, max_value):
        widgets = [
            progressbar.Percentage(format='%(percentage)3.2f%%'), " | ",
            "Iter:",
            progressbar.Variable('current_iter',
                                 format='{formatted_value}',
                                 width=0,
                                 precision=0), "/{0}".format(max_value), ", ",
            progressbar.Variable('snapshot',
                                 format='{formatted_value}',
                                 width=8,
                                 precision=0), " (",
            progressbar.Timer(format='ELA: %(elapsed)s'), ", ",
            progressbar.AdaptiveETA(), ")"
        ]

        self.bar = progressbar.ProgressBar(max_value=max_value,
                                           widgets=widgets,
                                           redirect_stdout=True)

        # Use multi-process for update.
        self.queue = Queue()
        self.process = Process(target=self._update, daemon=True)
        self.process.start()
Beispiel #24
0
    def run(self, seed_img, critics):
        """Run the optimizer on the image according to the loss returned by the critics.
        """
        image = seed_img.to("cuda").requires_grad_(True)

        obj = MultiCriticObjective(self.encoder, critics)
        opt = SolverLBFGS(obj, image, lr=self.lr)

        widgets = [
            progressbar.SimpleProgress(),
            " | ",
            progressbar.Variable("loss", format="{name}: {value:0.3e}"),
            " ",
            progressbar.Bar(marker="■", fill="·"),
            " ",
            progressbar.ETA(),
        ]
        progress = progressbar.ProgressBar(max_value=self.max_iter,
                                           widgets=widgets,
                                           variables={"loss": float("+inf")})

        previous = None
        for i in range(self.max_iter):
            # Perform one step of the optimization.
            loss = opt.step()
            # Update the progress bar with the result!
            progress.update(i, loss=loss)

            with torch.no_grad():
                # Constrain the image to the valid color range.
                image.data.clamp_(0.0, 1.0)

                # Return this iteration to the caller...
                yield loss.item(), image

                # See if we can terminate the optimization early.
                if previous is not None and abs(loss -
                                                previous) < self.precision:
                    assert i > 10, f"Optimization stalled at iteration {i}."
                    progress.max_value = i
                    break

                previous = loss

        progress.finish()
def scrape_data(browser, verbose = True):
    import progressbar
    import sys
    import time
    
    current_page = 1
    last_page = int(browser.find_element_by_css_selector('#ctl00_ContentPlaceHolder1_gridMessages_ctl00 > thead > tr.rgPager > td > div > div.rgWrap.rgInfoPart > strong:nth-child(2)').text)
    # Row follow the format of: '','Date','Agency','Subject','Message'
    AGENCY = 'NYC'

    data_rows = []

    if verbose:
        widgets = [progressbar.Percentage(), progressbar.Bar(), progressbar.Variable('date')]
        bar = progressbar.ProgressBar(widgets=widgets, max_value=last_page).start()
                                      
    while current_page != last_page:
        results_table = browser.find_element_by_xpath("//table[@class='rgMasterTable rgClipCells']")

        # Figure out where we are in the results
        page_list = results_table.find_element_by_class_name("rgNumPart")
        current_page = int(page_list.find_element_by_class_name("rgCurrentPage").text)
        
        #print("On page {} of {}".format(current_page, last_page))

        for row in results_table.find_element_by_tag_name('tbody').find_elements_by_tag_name('tr'):
            data = [c.text for c in row.find_elements_by_tag_name('td')]

            # Only collect data for the specific agency, drop SIR
            if data[2] == AGENCY and 'SIR' not in data[3]:
                data_rows.append(data[1:])

        #print("Last date processed {}".format(data[1]))
        if verbose:
            sys.stdout.flush()
            bar.update(current_page, date=data[1].split()[0])
        # Had some issues finding the next_button, so just use css_selector copied from chrome dev tools
        next_button = browser.find_element_by_css_selector('#ctl00_ContentPlaceHolder1_gridMessages_ctl00 > tfoot > tr.rgPager > td > div > div.rgWrap.rgArrPart2 > button.t-button.rgActionButton.rgPageNext')        
        next_button.click()

    if verbose:
        bar.finish()

    return data_rows
Beispiel #26
0
def examine_shapenet_loading_behavior():
    dataset = data_tools.load_shapenet([shape_map["mug"]], shuffle=True)
    dataset = data_tools.simulate_input(dataset, 10, 10, 10)
    batched_ds = dataset.batch(16)
    batched_ds = batched_ds

    widgets = [
        ' ',
        progressbar.Counter(), ' [',
        progressbar.Timer(), '] ', ' ',
        progressbar.Variable("shape"), ' '
    ]

    print()
    with progressbar.ProgressBar(widgets=widgets) as bar:
        for b, elem in enumerate(batched_ds):
            for i in range(16):
                shape_str = "{}:{}".format(elem['shape_category'][i].numpy(),
                                           elem['id'][i].numpy())
                bar.update(b, shape=shape_str)
        # print(i, , time.time() - t)

    print()
Beispiel #27
0
    def __init__(self, trainer):
        default_params = {
            "report_times_every_epoch": None,
            "report_interval_iters": 100,
            "record_file": "train.csv",
            "use_tensorboard": False
        }
        self.trainer = trainer
        default_params = utils.assign_params_dict(default_params,
                                                  self.trainer.params)

        if default_params["report_times_every_epoch"] is not None:
            self.report_interval_iters = max(
                1, self.trainer.training_point[2] //
                default_params["report_times_every_epoch"])
        else:
            self.report_interval_iters = default_params[
                "report_interval_iters"]

        if not self.trainer.params["debug"] and default_params[
                "use_tensorboard"]:
            # from tensorboardX import SummaryWriter
            from torch.utils.tensorboard import SummaryWriter
            model_name = os.path.basename(self.trainer.params["model_dir"])
            # time_string = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time()))
            # time_string = self.trainer.params["time_string"]
            # self.board_writer = SummaryWriter("{}/log/{}-{}-tensorboard".format(self.trainer.params["model_dir"], model_name, time_string))
            # self.board_writer = SummaryWriter("{}/log/{}-{}-tensorboard".format(
            #     self.trainer.params["model_dir"], time_string, model_name))
            self.board_writer = SummaryWriter("{}/log/tensorboard".format(
                self.trainer.params["model_dir"]))
        else:
            self.board_writer = None

        self.epochs = self.trainer.params["epochs"]

        self.optimizer = self.trainer.elements["optimizer"]

        # For optimizer wrapper such as lookahead.
        # "None" is the default value
        if getattr(self.optimizer, "optimizer", None) is not None:
            self.optimizer = self.optimizer.optimizer

        self.device = "[{0}]".format(
            utils.get_device(self.trainer.elements["model"]))

        self.record_value = []

        self.start_write_log = False
        if not self.trainer.params["debug"] and default_params[
                "record_file"] != "" and default_params[
                    "record_file"] is not None:
            self.record_file = "{0}/log/{1}".format(
                self.trainer.params["model_dir"],
                default_params["record_file"])

            # The case to recover training
            if self.trainer.params["start_epoch"] > 0:
                # train.csv using append mode
                self.start_write_log = True
            elif os.path.exists(self.record_file):
                # Do backup to avoid clearing the loss log when re-running a same launcher.
                bk_file = "{0}.backup.{1}".format(
                    self.record_file,
                    time.strftime('%Y-%m-%d_%H:%M:%S',
                                  time.localtime(time.time())))
                shutil.move(self.record_file, bk_file)
        else:
            self.record_file = None

        # A format to show progress
        # Do not use progressbar.Bar(marker="\x1b[32m█\x1b[39m") and progressbar.SimpleProgress(format='%(value_s)s/%(max_value_s)s') to avoid too long string.
        widgets = [
            progressbar.Percentage(format='%(percentage)3.2f%%'), " | ",
            "Epoch:",
            progressbar.Variable('current_epoch',
                                 format='{formatted_value}',
                                 width=0,
                                 precision=0), "/{0}, ".format(self.epochs),
            "Iter:",
            progressbar.Variable('current_iter',
                                 format='{formatted_value}',
                                 width=0,
                                 precision=0),
            "/{0}".format(self.trainer.training_point[2]), " (",
            progressbar.Timer(format='ELA: %(elapsed)s'), ", ",
            progressbar.AdaptiveETA(), ")"
        ]

        # total num of iter
        max_value = self.trainer.params[
            "epochs"] * self.trainer.training_point[2]

        self.bar = progressbar.ProgressBar(max_value=max_value,
                                           widgets=widgets,
                                           redirect_stdout=True)

        # Use multi-process for update.
        self.queue = Queue()
        self.process = Process(target=self._update, daemon=True)
        self.process.start()
    def train(self, fname, trng_samples, training_iterations=250):

        widgets= [ progressbar.Variable('accuracy', width=4, precision=4), ' ',
                 progressbar.Variable('wg10', width=4), ' ',
                 progressbar.Variable('wlng10', width=4), ' ',
                '[', progressbar.Timer(), ']',
                 progressbar.Bar(),
                 '(', progressbar.ETA(), ')'
        ]

        bar=progressbar.ProgressBar(max_value=training_iterations, widgets=widgets)
        prog=0

        #read in training set
        trng_labels, trng_set = data_to_array(fname, self.features, trng_samples)

        #set t0. used for learning rate update
        t0=dt.now()

        #convert to cupy arrays
        trng_labels=cp.asarray(trng_labels)
        trng_set=cp.asarray(trng_set)

        #loop for amount of training iterations
        for i in range(training_iterations):
            converge_status=False
            correct=0

            #iterate through training set
            for j in range(trng_set.shape[0]):

                #make prediction
                pred_index, prediction, vth, hn=self.trng_prediction(trng_set[j])

                #checks if prediction is incorrect
                if pred_index != self.labels.index(trng_labels[j]):
                    #calculate portion of squared loss.
                    loss=.5*(trng_labels[j]-(cp.sum(vth))**2)

                    #update loss if no change from last known loss is observed
                    #only update every thousand training examples
                    if j % self.update_rate == 0:
                        if abs(loss-self.last_loss)>=5:
                            self.last_loss=loss
                            #calculate average loss from 1000 examples
                            self.last_loss/=self.update_rate
                            t_elapsed=dt.now()-t0
                            self.learning_rate=self.n0/((t_elapsed.total_seconds()/3600)+1)

                    else:
                        self.last_loss+=loss

                    self.backpropogate(vth, hn, trng_set[j], self.labels.index(trng_labels[j]))

                else:
                    correct+=1
            bar.update(i,
                       accuracy=100.*float(correct)/trng_set.shape[0],
                       wg10=len(list(self.h_matrix[self.h_matrix > 10]) +
                                list(self.v_matrix[self.v_matrix < -10])),
                       wlng10=len(list(self.h_matrix[self.h_matrix < -10]) +
                                  list(self.v_matrix[self.v_matrix < -10])))
            correct=0
Beispiel #29
0
    def run(self, accessor, opts, on_progress=None):
        """Run some repairs.

        See command.BaseCommand
        """
        accessor.connect()

        # TODO: use multiprocessing + progressbar here. Probably remove some
        # of the current arguments and generate them instead based on a number
        # of processes to do a full scan.
        self.metrics_file_path = opts.metrics_file_path

        if opts.storage_dir:
            settings = {"path": opts.storage_dir}
            with metadata_cache.DiskCache(accessor, settings) as cache:
                cache.repair(
                    shard=opts.shard,
                    nshards=opts.nshards,
                    start_key=opts.start_key,
                    end_key=opts.end_key,
                )
        else:
            logging.warning(
                "Skipping disk cache repair because storage_dir is empty")

        out_fd = sys.stderr
        if opts.quiet:
            out_fd = _DEV_NULL

        if self.pbar is None:
            start_key = -1 * 2**63
            end_key = 2**63 - 1

            if opts.start_key is not None:
                start_key = int(opts.start_key)
            if opts.end_key is not None:
                end_key = int(opts.end_key)

            widgets = [
                progressbar.Variable('token',
                                     format='(current: {formatted_value})'),
                ' ',
                progressbar.Percentage(),
                ' ',
                progressbar.SimpleProgress(
                    format='(%s)' % progressbar.SimpleProgress.DEFAULT_FORMAT),
                ' ',
                progressbar.Bar(),
                ' ',
                progressbar.Timer(),
                ' ',
                progressbar.AdaptiveETA(),
            ]

            # max_value = end_key - start_key
            self.pbar = progressbar.ProgressBar(widgets=widgets,
                                                fd=out_fd,
                                                redirect_stderr=False,
                                                min_value=0,
                                                max_value=end_key - start_key)

        self.pbar.start()

        if on_progress is None:

            def _on_progress(total, done, token):
                self.pbar.update(total, token=token)

                if self.metrics_file_path != "":
                    write_to_textfile(self.metrics_file_path, REGISTRY)

            on_progress = _on_progress

        accessor.repair(
            shard=opts.shard,
            nshards=opts.nshards,
            start_key=opts.start_key,
            end_key=opts.end_key,
            callback_on_progress=on_progress,
        )

        self.pbar.finish()

        # Final metric dump
        if self.metrics_file_path != "":
            write_to_textfile(self.metrics_file_path, REGISTRY)
Beispiel #30
0
state_size = 2
agent = Agent(state_size, 2)
agent.load(
    os.path.join(
        utils.get_save_dir(),
        "Pendulum_Apr07_12-17-45_alpha=0.6, min_eps=0.01, eps_decay=0.2/checkpoint_final.pth"
    ))
action = None

render = False
n_trials = 10000
n_fail = 0
widgets = [
    progressbar.Percentage(),
    progressbar.Bar(),
    progressbar.Variable('fails'),
    ', ',
    progressbar.Variable('trials'),
]
with progressbar.ProgressBar(max_value=n_trials, widgets=widgets) as bar:
    for trial in range(n_trials):
        env.reset()
        env.state = start_state
        for i in range(4):
            # if action is None or np.random.rand() < 0.8:
            action = agent.act(env.state)
            next_state, reward, done, _ = env.step(action)
            if np.random.rand() > 0.8:
                next_state, reward, done, _ = env.step(action)  # sticky action
            if render:
                env.render()