예제 #1
0
 def run(self, input_channel):
     for output in self.output_pairs(input_channel):
         if isinstance(output, tuple):
             left_output, right_output = output
         else:
             yield output
             continue
         if isinstance(left_output, jqsh.values.Number) and isinstance(right_output, jqsh.values.Number):
             yield jqsh.values.Number(left_output * right_output)
         elif isinstance(left_output, jqsh.values.String) and isinstance(right_output, jqsh.values.Number):
             if right_output % 1 == 0:
                 yield jqsh.values.String(left_output.value * int(right_output))
             else:
                 yield jqsh.values.JQSHException('integer')
         elif isinstance(left_output, jqsh.values.Array) and isinstance(right_output, jqsh.values.Number):
             if right_output % 1 == 0:
                 yield jqsh.values.Array(more_itertools.ncycles(left_output, int(right_output)))
             else:
                 yield jqsh.values.JQSHException('integer')
         elif isinstance(left_output, decimal.Decimal) and isinstance(right_output, jqsh.values.String):
             if left_output % 1 == 0:
                 yield jqsh.values.String(right_output.value * int(left_output))
             else:
                 yield jqsh.values.JQSHException('integer')
         elif isinstance(left_output, decimal.Decimal) and isinstance(right_output, jqsh.values.Array):
             if left_output % 1 == 0:
                 yield jqsh.values.Array(more_itertools.ncycles(right_output, int(left_output)))
             else:
                 yield jqsh.values.JQSHException('integer')
         else:
             yield jqsh.values.JQSHException('type')
예제 #2
0
 def test_happy_path(self):
     """cycle a sequence three times"""
     r = ["a", "b", "c"]
     n = mi.ncycles(r, 3)
     self.assertEqual(
         ["a", "b", "c", "a", "b", "c", "a", "b", "c"], list(n)
     )
예제 #3
0
 def test_happy_path(self):
     """cycle a sequence three times"""
     r = ["a", "b", "c"]
     n = mi.ncycles(r, 3)
     self.assertEqual(
         ["a", "b", "c", "a", "b", "c", "a", "b", "c"],
         list(n)
     )
예제 #4
0
async def validator():
    cache_data = validator_TTCache.get(VALIDATOR_CACHE_KEY)
    if cache_data:
        resp: ValidatorsResponse = cache_data
    else:
        async with lock:
            cache_data = validator_TTCache.get(VALIDATOR_CACHE_KEY)
            if cache_data:
                return cache_data
            else:
                latest_block_number_tasks = []
                for validator in setting.validator_list:
                    latest_block_number_tasks.append(get_latest_block(validator))
                latest_infos = await asyncio.gather(*latest_block_number_tasks, return_exceptions=True)
                latest_infos_no_exception = list(filter(lambda x: x.block_number != NO_LATEST_BLOCK, latest_infos))
                latest_num_dict: Dict[str, LatestInfo] = {i.validator.host: i for i in latest_infos}
                # get latest blocks from all the validators failed then randomly return the `nextToPropose`
                if len(latest_infos_no_exception) == 0:
                    best = random.choice(setting.validator_list)
                    max_block_numbers = NO_LATEST_BLOCK
                else:
                    max_block_numbers = max([i.block_number for i in latest_infos_no_exception])
                    latest = first_true(latest_infos_no_exception, lambda x: x.block_number == max_block_numbers)
                    index = one(locate(setting.validator_list, lambda x: x.pub_key == latest.sender))

                    # why +2 ?
                    # actually index validator should be the latest proposed validator
                    # but it is possible that at this moment, the next validator is already trying
                    # to propose a new block. So choosing the +2 validator is more reliable
                    best = nth(ncycles(setting.validator_list, 2), index + 2)
                split_validators = list(split_before(setting.validator_list, lambda x: x.host == best.host))
                if len(split_validators) == 1:
                    sorted_validators = one(split_validators)
                else:
                    sorted_validators = last(split_validators) + first(split_validators)

                validators = list(map(lambda x: Validator(host=x.host, grpc_port=x.grpc_port, http_port=x.http_port,
                                                          latestBlockNumber=latest_num_dict.get(x.host).block_number,
                                                          timestamp=latest_num_dict.get(x.host).timestamp),
                                      sorted_validators))

                nextToPropose = NextToPropose(host=best.host, grpcPort=best.grpc_port, httpPort=best.http_port,
                                              latestBlockNumber=max_block_numbers)
                resp = ValidatorsResponse(nextToPropose=nextToPropose, validators=validators)
                validator_TTCache[VALIDATOR_CACHE_KEY] = resp
    return resp.dict()
예제 #5
0
 def test_pathalogical_case(self):
     """asking for negative cycles should return an empty iterator"""
     n = mi.ncycles(range(100), -10)
     self.assertRaises(StopIteration, lambda: next(n))
예제 #6
0
 def test_null_case(self):
     """asking for 0 cycles should return an empty iterator"""
     n = mi.ncycles(range(100), 0)
     self.assertRaises(StopIteration, lambda: next(n))
예제 #7
0
labels = (
    np.asarray(chunk)
    for chunk in chunked(labels_unchunked(training_filenames), batch_size))


def full_gen():
    print("Running full gen...")
    for x in gen_imgs:
        y = labels.next()
        if x.shape[0] != batch_size:
            pass
        yield np.squeeze(x), np.squeeze(y)


gen = ncycles(full_gen(), 10000)


def get_test_labels():
    print("Generating test labels...")
    test_labels = []
    # Note: Example only includes subset of files
    # for live demo. To use all fies, comment
    # out the line below and uncomment the line two below
    for i in range(0, 50):
        #for i in range(0, len(test_filenames)):
        output = test_filenames[i].split("/")[2]
        if int(output) == 0:
            out = np.array([1, 0])
        else:
            out = np.array([0, 1])
예제 #8
0
 def test_pathalogical_case(self):
     """asking for negative cycles should return an empty iterator"""
     n = mi.ncycles(range(100), -10)
     self.assertRaises(StopIteration, lambda: next(n))
예제 #9
0
 def test_null_case(self):
     """asking for 0 cycles should return an empty iterator"""
     n = mi.ncycles(range(100), 0)
     self.assertRaises(StopIteration, lambda: next(n))
예제 #10
0
    def evaluate(self, models, epoch=None):
        if self.cache and self.iterations > 1:
            warnings.warn(
                f'Evaluating with caching and {self.iterations} iterations. Evaluating more than once only makes sense without caching.'
            )

        if len(self.problems) == 0:
            # If there are no problems, return an empty log.
            return {}

        time_begin = tf.timestamp()

        print(
            f'Evaluating symbol cost model with solver on {len(self.problems)} problems after epoch {epoch}...',
            file=sys.stderr)

        df_data = {'problem': self.problems}

        get_precedences = None
        if not self.isolated and not self.baseline:
            data = {}
            for name, model in models.items():
                print(
                    f'Predicting {name} symbol costs on {len(self.problems)} problems...'
                )
                res = model.predict(self.problems_dataset, verbose=1)
                precedences, precedence_costs = self.precedences(res['costs'])
                data[name] = {
                    'valid': res['valid'],
                    'precedences': precedences
                }
                df_data[name, 'symbols'] = res['costs'].row_lengths()
                df_data[name, 'symbol_cost_valid'] = res['valid']
                df_data[name, 'precedence_cost'] = precedence_costs

            def get_precedences(problem_i):
                res = {}
                for name, vals in data.items():
                    if not vals['valid'][problem_i]:
                        return None
                    res[name] = vals['precedences'][problem_i]
                return res

        cases = more_itertools.ncycles(range(len(self.problems)),
                                       self.iterations)
        print(
            f'Evaluating on {len(self.problems) * self.iterations} cases ({len(self.problems)} problems, {self.iterations} iterations)...',
            file=sys.stderr)
        records = self.parallel(
            joblib.delayed(self.solve_one)(models, problem_i, get_precedences)
            for problem_i in cases)

        problems_filtered = self.problems

        if self.isolated:
            for name in models:
                # We get the values only from the first iteration, assuming they don't differ across iterations.
                assert 'precedence_cost' not in df_data
                df_data[name, 'precedence_cost'] = [
                    r.get((name, 'precedence_cost'), None)
                    for r in records[:len(problems_filtered)]
                ]
                assert 'symbols' not in df_data
                df_data[name, 'symbols'] = [
                    r.get((name, 'symbols'), None)
                    for r in records[:len(problems_filtered)]
                ]

        iter_dfs = []
        field_series = {k: [] for k in self.columns}
        for i in range(self.iterations):
            iter_records = records[i * len(problems_filtered):(i + 1) *
                                   len(problems_filtered)]
            iter_dicts = iter_records
            iter_df = dataframe_from_records(iter_dicts,
                                             dtypes=vampire.Result.pd_dtypes,
                                             index=problems_filtered)
            iter_dfs.append(iter_df)
            for k in self.columns:
                field_series[k].append(iter_df[k])
        for k, l in field_series.items():
            df = pd.DataFrame(dict(enumerate(l)))
            if k == 'returncode':
                df_data['success', 'rate'] = (df == 0).mean(axis=1)
            else:
                df_data.update({
                    (k, 'mean'):
                    df.mean(axis=1),
                    (k, 'std'):
                    np.std(df, axis=1),
                    (k, 'variation'):
                    scipy.stats.variation(df.to_numpy(dtype=np.float,
                                                      na_value=np.nan),
                                          axis=1)
                })
                if k == 'memory_used':
                    df_data[k, 'max'] = df.max(axis=1)
        main_iter_df = pd.concat(iter_dfs,
                                 axis='columns',
                                 keys=range(self.iterations))
        header_df = pd.DataFrame(df_data)
        header_df.set_index('problem', inplace=True)
        main_df = pd.concat([header_df, main_iter_df], axis='columns')
        main_df.index.name = 'problem'

        logs = self.evaluate_dataframe(main_df, 'symbol_cost', self.iterations,
                                       models.keys(), epoch)

        with self.tensorboard.train_writer.as_default():
            tf.summary.scalar('time/epoch/solver_eval',
                              tf.timestamp() - time_begin,
                              step=epoch)

        print(f'Solver evaluation after epoch {epoch}:\n{yaml.dump(logs)}')

        return logs