Ejemplo n.º 1
0
    def disperse_helper(b, part_seq_1):
        if b != 0:
            half_diff = float(b) / 2.0

            mid_1 = int(math.floor(half_diff))
            mid_2 = int(math.ceil(half_diff))

            if 0 < mid_1 and b > mid_2:
                part_seq_1, part_seq_2 = itertools.tee(part_seq_1)

                front_mid_1_seq, mid_1_val, _ = split(mid_1, part_seq_1)
                _, mid_2_val, back_mid_2_seq = split(mid_2, part_seq_2)
                del _

                mid_2_val = itertools.tee(mid_2_val)
                back_mid_2_seq = concat([mid_2_val[0], back_mid_2_seq])
                mid_2_val = mid_2_val[1]

                yield (first(mid_2_val))

                for _1, _2 in zip(disperse_helper(mid_1 - 0, front_mid_1_seq),
                                  disperse_helper(b - mid_2, back_mid_2_seq)):
                    yield (_2)
                    yield (_1)

                if mid_1 != mid_2:
                    yield (first(mid_1_val))
Ejemplo n.º 2
0
    def extract_container_state(state_json):
        if not state_json:
            return "N/A"

        current_state = itertoolz.first(state_json)
        current_state_details = state_json.get(current_state)
        state_at = itertoolz.first(current_state_details)
        state_at_details = current_state_details.get(state_at)
        if state_at == "startedAt":
            state_at_details = arrow.get(state_at_details).humanize()

        return current_state, f"{current_state.strip()} ({state_at.strip()}: {state_at_details.strip()})"
Ejemplo n.º 3
0
def set_base_data(request):
    context = {
        'settings': settings,
    }

    path = request.path
    excluded = (
        '/%s' % settings.ADMIN_URL,
        '/%s/' % settings.API_URL.strip('/'),
    )
    if path.startswith(excluded): return context

    get_array_item_by_name = lambda name, collection: [item for item in collection if item.name == name]
    get_first = lambda collection: first(collection) if len(collection) else None
    get_by_name = lambda name, collection: get_first(get_array_item_by_name(name, collection))

    section_main = Section.get_main()

    context.update({
        'host': settings.SITE_HOST,
        'config': globals.config,
        'category_product': list(Category.get_main()),
        'top_menu': section_main,
        'catalog_section': globals.catalog,
        'cart_section': get_by_name('shopping-cart', section_main),
    })

    return context
Ejemplo n.º 4
0
def test_print_table(labels, title):
    table = cli_inference.print_table(labels, title, print=False)
    assert isinstance(table, rich.table.Table)
    assert table.title == title
    unique = itertoolz.count(itertoolz.unique(labels))
    assert table.row_count == unique + 1
    assert all(label in getattr(itertoolz.first(table.columns), "_cells")
               for label in labels)
    table = cli_inference.print_table(labels, title, print=True)
Ejemplo n.º 5
0
def test_super_learner():
    np.random.seed(0)
    X, y = load_boston(return_X_y=True)
    X = pandas.DataFrame(X, columns=['x%d' % i for i in range(X.shape[1])])
    model = CrossValidatingEstimator(SuperLearner(
        [('linear', LinearRegression()), ('earth', Earth(max_degree=2))],
        LinearRegression(),
        cv=5,
        n_jobs=1),
                                     cv=5)
    cv_pred = model.fit_predict(X, y)
    pred = model.predict(X)
    cv_r2 = r2_score(y, cv_pred)
    best_component_cv_r2 = max([
        r2_score(
            y,
            first(model.estimator_.cross_validating_estimators_.values()).
            cv_predictions_) for i in range(2)
    ])
    assert cv_r2 >= .9 * best_component_cv_r2

    code = sklearn2code(model, ['predict'], numpy_flat)
    module = exec_module('module', code)
    test_pred = module.predict(**X)
    try:
        assert_array_almost_equal(np.ravel(pred), np.ravel(test_pred))
    except:
        idx = np.abs(np.ravel(pred) - np.ravel(test_pred)) > .000001
        print(np.ravel(pred)[idx])
        print(np.ravel(test_pred)[idx])
        raise
    print(r2_score(y, pred))
    print(r2_score(y, cv_pred))

    print(
        max([
            r2_score(
                y,
                first(model.estimator_.cross_validating_estimators_.values()).
                cv_predictions_) for i in range(2)
        ]))
Ejemplo n.º 6
0
def update_in(d, keys, fn, default=None):
    key = first(keys)
    if len(keys) == 1:
        if key not in d:
            d[key] = default
        d[key] = fn(d[key])
    else:
        if key not in d:
            d[key] = dict()
        nested = d[key]
        update_in(nested, keys[1:], fn, default)

    return d
Ejemplo n.º 7
0
def crimes_predict(hour, day_of_week, month, grid_id):
    if request.method == 'GET':
        try:
            prediction = int(itertoolz.first(model.predict([[hour, day_of_week, month, grid_id]])))
            payload = {"group_id": prediction}
            success = True
            code = 200
        except:
            payload = None
            success = False
            code = 400
        finally:
            return jsonify({"payload": payload, "success": success}), code
Ejemplo n.º 8
0
    def _choose_best_candidate(self, score_candidates):
        # If x > 0 then it means there is not evidence of title
        filtered_candidates = dicttoolz.valfilter(lambda x: x > 0,
                                                  score_candidates)
        if not filtered_candidates:
            return None

        if len(filtered_candidates) == 1:
            return itertoolz.first(filtered_candidates)

        c = list(sorted(filtered_candidates, key=len, reverse=True))
        if len(filtered_candidates) == 2:
            if len(c) == 2:
                return c[1] if c[1] in c[0] else c[0]
        return c[0]
Ejemplo n.º 9
0
def cached_tc_query(query, test_cases, multiple=False):
    def title_match(tc):
        #klass, method_name = get_class_methodname(str(tc.title))
        res = query in tc.title
        return res

    matches = list(filter(title_match, test_cases))
    retval = []
    if not multiple and len(matches) > 1:
        raise Exception("Can not have more than one match, modify your query")
    elif len(matches) == 0:
        if not multiple:
            retval = False
    else:
        if multiple:
            retval = matches
        else:
            retval = itz.first(matches)
    return retval
Ejemplo n.º 10
0
 def serializer_for(self, type: Type) -> Type[Serializer]:
     if self._serializer_map.get(type):
         return self._serializer_map[type]
     else:
         # Check if the type is a subclass of any of the defined serializers
         possible_bindings = funcy.lfilter(
             lambda t: issubclass(type, t), self._bindings
         )
         if len(possible_bindings) == 0:
             # No serializer found. Return the default serializer
             if self._default_serializer:
                 return self._default_serializer
             else:
                 raise Exception(f"No serializer could be found for the type {type}")
         elif len(possible_bindings) == 1:
             return possible_bindings[0][1]
         else:
             logger.warning(
                 f"More than one serializer found for type {type}. "
                 "Choosing the first one."
             )
             return itertoolz.first(possible_bindings)[1]
Ejemplo n.º 11
0
    def sequences():
        freq_threshold = args.frequencycutoff

        seq_id = 0
        for f_id, fn in enumerate(args.files, start=2):
            seq_count = 0
            set_id = first(os.path.basename(fn).split('.', 1))
            with open(fn) as in_fd:
                for seq in SeqIO.parse(in_fd, 'fasta'):
                    try:
                        freq = int(seq.description.rsplit('_', 1)[1])
                    except ValueError, IndexError:
                        warnings.warn(
                            'Could not find frequency in sequence name')
                        freq = 1

                    if freq < freq_threshold:
                        continue

                    g.add_node(seq_id, sample=str(set_id), freq=freq)

                    seq_count += 1
                    seq_id += 1
                    yield seq
Ejemplo n.º 12
0
 def si_dict():
     brain = first(paper_view.getSpecialIssues())
     return {'title': brain.Title, 'url': brain.getURL}
Ejemplo n.º 13
0
def test_get_all():
    store.put(trekkies)
    assert first(store.get_all()) == trekkies
    store.delete(trekkies.id)
Ejemplo n.º 14
0
        self.filename = filename
        self.zipfile = zipfile
        self.stream = None

    def __enter__(self):
        self.stream = self.zipfile.open(self.filename)
        ustream = TextIOWrapper(self.stream, 'utf-8')
        return csv.reader(ustream, quotechar='"')

    def __exit__(self, *exc_info):
        self.stream.close()


open_as_csv = curry(OpenFileInZipAsCSV, gtf)
by_nth = curry(lambda nth, uid, obj: obj[nth] == str(uid))
list_first = curry(lambda it: [first(it)])
list_first_or_empty = excepts(StopIteration, list_first, lambda _: [])
filter_by_nth = curry(
    lambda nth, uid, stream: filter(by_nth(nth, uid), stream))
filter_by_uid = filter_by_nth(0)


@curry
def swap(i, j, arr):
    arr[i], arr[j] = arr[j], arr[i]
    return arr


def list_file(name):
    with open_as_csv(name) as stream:
        return list(stream)
Ejemplo n.º 15
0
def syms_super_learner(estimator):
    return syms(first(estimator.cross_validating_estimators_.values()))
Ejemplo n.º 16
0
def test_first():
    assert first('ABCDE') == 'A'
    assert first((3, 2, 1)) == 3
    assert isinstance(first({0: 'zero', 1: 'one'}), int)
Ejemplo n.º 17
0
def test_interpose():
    assert "a" == first(rest(interpose("a", range(1000000000))))
    assert "tXaXrXzXaXn" == "".join(interpose("X", "tarzan"))
    assert list(interpose(0, itertools.repeat(1, 4))) == [1, 0, 1, 0, 1, 0, 1]
    assert list(interpose('.', ['a', 'b', 'c'])) == ['a', '.', 'b', '.', 'c']
Ejemplo n.º 18
0
            prediction = int(itertoolz.first(model.predict([[hour, day_of_week, month, grid_id]])))
            payload = {"group_id": prediction}
            success = True
            code = 200
        except:
            payload = None
            success = False
            code = 400
        finally:
            return jsonify({"payload": payload, "success": success}), code


if __name__ == '__main__':

    base_path = path.dirname(path.abspath(__file__))
    path_one_step_back = itertoolz.first(path.split(base_path))
    path_two_step_back = itertoolz.first(path.split(path_one_step_back))

    crimes_dir_exists = path.exists(path.join(path_one_step_back, 'csv-files'))

    if crimes_dir_exists:
        df = pd.read_csv(path.join(path_one_step_back, 'csv-files', 'crimes.csv'))
    else:
        df = pd.read_csv(path.join(path_two_step_back, 'csv-files', 'crimes.csv'))

    group_ids = [57, 20, 41, 34, 36, 19, 42]
    columns_to_drop = ['id', 'offense_code', 'offense_group', 'longitude', 'latitude', 'year', 'tract_id']

    df = df[df['group_id'].isin(group_ids)].copy()
    df.drop(columns_to_drop, axis=1, inplace=True)