コード例 #1
0
ファイル: format.py プロジェクト: Surpris/DataViewerBase
 def tags(beamline, runs):
     hi_tags = fromiter(map(read_hightagnumber(beamline), runs), 'int')
     if not hi_tags.all():
         raise ValueError('Not all the runs have a single high tag!')
     hi_tag = hi_tags[0]
     low_tags = concat(map(read_taglist_byrun(beamline), runs))
     return hi_tag, low_tags
コード例 #2
0
ファイル: train.py プロジェクト: cgarciae/irs2
    def data_generator_fn():
        data_generator = dataset.training_set.random_batch_arrays_generator(
            batch_size)
        data_generator = cz.map(
            Dict(image=P[0], labels=P[1], keras_training=False),
            data_generator)
        # data_generator = cz.map(utils.get_processed_image, data_generator)

        return data_generator
コード例 #3
0
ファイル: format.py プロジェクト: Surpris/DataViewerBase
        def __getitem__(self, key: str) -> iter:
            if key not in self.map:
                raise ValueError(
                    dedent("""\
                        Key '{}' is invalid!
                        Valid keys: {}
                        """.format(
                        key,
                        reduce(lambda k1, k2: '{}, {}'.format(k1, k2),
                               map(lambda k: "'{}'".format(k), self.map)))))
            ref = self.map[key]
            if 'api' not in ref:
                ref['api'] = 'dbpy'  # default api
            api = ref['api']

            # load reader
            if key not in self.cache:
                print("Loading '{}' reader...".format(key))
                if api not in ('dbpy', 'stpy'):
                    raise ValueError("Invalid api type '{}'!".format(api))
                if 'id' not in ref:
                    ref['id'] = key  # default id
                id = ref['id']
                if api == 'dbpy':
                    self.cache[key] = fromiter(
                        read_syncdatalist_float(id, self.hi_tag,
                                                tuple(map(int,
                                                          self.low_tags))),
                        'float')
                if api == 'stpy':
                    self.cache[key] = StorageWrapper(*map(int, self.runs),
                                                     beamline=self.beamline,
                                                     id=id)
                if 'deco' not in ref:
                    ref['deco'] = identity  # default deco
                print('Loaded!')

            data = self.cache[key]
            deco = ref['deco'] if hasattr(ref['deco'], '__call__') else eval(
                ref['deco'])
            if api == 'dbpy':
                return map(deco, data)
            if api == 'stpy':
                return map(compose(deco, data.__getitem__), self.low_tags)
コード例 #4
0
def make_dict_and_corpus(tokensets):

    dictionary = corpora.Dictionary(tokensets)

    dictionary.filter_extremes(no_below=5, no_above=0.5, keep_n=100000)
    dictionary.compactify()

    corpus = list(tlz.map(dictionary.doc2bow, tokensets))

    return (dictionary, corpus)
コード例 #5
0
def csv(specs):
    """Converts a spec dict to a csv entry

    Args:
        specs (sequence): spec dicts

    Returns:
        sequence: csv records
    """

    return map(lambda spec: '{x},{y},{value}\n'.format(**spec), specs)
コード例 #6
0
def write(values, filepath):
    """Writes a sequence of values to a filepath

    Args:
        filepath (str): Full path of raster file to write
        values (sequence): Values to write

    Returns:
        tuple: (filepath, bytes written)
    """

    with open(filepath, 'w+') as handle:
        return (filepath, reduce(add, map(handle.write, values)))
コード例 #7
0
ファイル: carve.py プロジェクト: jondot/carve
def treemap(tree, mapfn, key=None, path=()):
    res = tree
    if isinstance(tree, dict):
        res = keyfilter(
            identity,
            itemmap(
                lambda item: treemap(item[1], mapfn, item[0], path +
                                     (item[0], )), tree),
        )

    elif isinstance(tree, list):
        res = list(map(lambda t: treemap(t, mapfn, None, path), tree))

    return xform(key, res, mapfn, path)
コード例 #8
0
def locations(spec):
    """Generator for all locations represented by a spec

    Args:
        spec (dict): xsize, ysize, pixel_x, pixel_y, ulx, uly keys

    Returns:
        Generator yielding spec plus x, y, x_index and y_index keys
    """

    locator = partial(locate, spec=spec)
    indices = ((y, x) for y in range(spec['ysize'])
               for x in range(spec['xsize']))
    return map(locator, indices)
コード例 #9
0
def map_delayed(func, mps, **kwargs):
    func = partial(func, **kwargs)
    delayed_func = delayed(func)
    return map(delayed_func, mps)
コード例 #10
0
def main(device, epochs, batch_size):

    # seed: resultados repetibles
    seed = 32
    np.random.seed(seed=seed)
    random.seed(seed)

    # dataget
    dataset = data("german-traffic-signs").get()

    # data_generator
    data_generator = dataset.training_set.random_batch_arrays_generator(
        batch_size)
    data_generator = utils.batch_random_image_rotation(data_generator, 15.0)
    data_generator = cz.map(Dict(features=P[0], labels=P[1]), data_generator)

    graph = tf.Graph()
    sess = tf.Session(graph=graph)

    # inputs
    inputs = SupervisedInputs(
        name=network_name + "_inputs",
        graph=graph,
        sess=sess,
        # tensors
        features=dict(shape=(None, 32, 32, 3)),
        labels=dict(shape=(None, ), dtype=tf.uint8))

    # create model template
    template = Model(
        n_classes=43,
        name=network_name,
        model_path=model_path,
        graph=graph,
        sess=sess,
        seed=seed,
        optimizer=tf.train.AdamOptimizer,
    )

    # model

    with tf.device(device):
        inputs = inputs()
        model = template(inputs)

    # initialize variables
    model.initialize()

    # fit
    print("training")
    model.fit(
        data_generator=data_generator,
        epochs=epochs,
        log_summaries=True,
        log_interval=10,
        print_test_info=True,
    )

    # save
    print("saving model")
    model.save()
コード例 #11
0
ファイル: pynhd.py プロジェクト: jsta/pynhd
    def stage_data(self) -> pd.DataFrame:
        """Stage the NHDPlus Attributes database and save to nhdplus_attrs.feather."""
        r = self.get_children(self.nhd_attr_item)

        titles = tlz.pluck("title", r["items"])
        titles = tlz.concat(
            tlz.map(tlz.partial(re.findall, "Select(.*?)Attributes"), titles))
        titles = tlz.map(str.strip, titles)

        main_items = dict(zip(titles, tlz.pluck("id", r["items"])))

        files = {}
        soil = main_items.pop("Soil")
        for i, item in main_items.items():
            r = self.get_children(item)

            titles = tlz.pluck("title", r["items"])
            titles = tlz.map(
                lambda s: s.split(":")[1].strip() if ":" in s else s, titles)

            child_items = dict(zip(titles, tlz.pluck("id", r["items"])))
            files[i] = {t: self.get_files(c) for t, c in child_items.items()}

        r = self.get_children(soil)
        titles = tlz.pluck("title", r["items"])
        titles = tlz.map(lambda s: s.split(":")[1].strip()
                         if ":" in s else s, titles)

        child_items = dict(zip(titles, tlz.pluck("id", r["items"])))
        stat = child_items.pop("STATSGO Soil Characteristics")
        ssur = child_items.pop("SSURGO Soil Characteristics")
        files["Soil"] = {t: self.get_files(c) for t, c in child_items.items()}

        r = self.get_children(stat)
        titles = tlz.pluck("title", r["items"])
        titles = tlz.map(lambda s: s.split(":")[1].split(",")[1].strip(),
                         titles)
        child_items = dict(zip(titles, tlz.pluck("id", r["items"])))
        files["STATSGO"] = {
            t: self.get_files(c)
            for t, c in child_items.items()
        }

        r = self.get_children(ssur)
        titles = tlz.pluck("title", r["items"])
        titles = tlz.map(lambda s: s.split(":")[1].strip(), titles)
        child_items = dict(zip(titles, tlz.pluck("id", r["items"])))
        files["SSURGO"] = {
            t: self.get_files(c)
            for t, c in child_items.items()
        }

        chars = []
        types = {"CAT": "local", "TOT": "upstream_acc", "ACC": "div_routing"}
        for t, dd in files.items():
            for d, fd in dd.items():
                for f, u in fd.items():
                    chars.append({
                        "name": f,
                        "type": types.get(f[-3:], "other"),
                        "theme": t,
                        "description": d,
                        "url": u[0],
                        "meta": u[1],
                    })
        char_df = pd.DataFrame(chars, dtype="category")
        char_df.to_feather(self.char_feather)
        return char_df
コード例 #12
0
ファイル: server.py プロジェクト: lemonade512/KLSBadgeHack
            # instead of modifying/using global state, choosing to pass in
            # the updated request as a param means that the handler functions
            # are all pure functions of their input params.
            #
            # This should make testing them easier - it's one less thing to mock.
            return req_fun(t.merge(opts, r), *args, **kwargs)

        return requirejson_wrapper
    return reqjson

# converts a dictionary to flat list of key/value pairs.
# each key can have multiple values and they will all be unpacked accordingly.
multipairs=lambda d: list(t.concat(t.map(
                          lambda i: (lambda k,v: t.concat((k,e) for e in v)
                                                if isinstance(v,list)
                                                else (k,v))(i[0],i[1]),
                      d.items())))

# --------------------------------------------------------------------------
#                                      REST API
# --------------------------------------------------------------------------
@app.get('/')
def default(message=''):
  return template('signin', message=message)

@app.post('/signin')
@params(keys=['barcode'])
def signin(p):
  u = filter(lambda v: v.id == p['barcode'], data['users'].values())
  if len(u) > 0: