예제 #1
0
def test_cost_guided_traversal():
    tree = LazyTree(root=(0, 1), child_map=split, view=lambda x: x[1] - x[0])
    assert_contracting(tree.cost_guided_traversal(lambda x: -x))

    sizes1 = fn.take(5, tree.cost_guided_traversal(lambda x: x))
    sizes2 = fn.take(5, tree.bfs())
    assert sizes1 == sizes2
def test_gen_dist_orth():
    p1 = mbp.from_threshold(lambda x: x[1] >= 0.8, 2)
    p2 = mbp.from_threshold(lambda x: x[0] >= 0.6, 2)

    for d12 in fn.take(10, mdth.gen_directed_dists(p1, p2)):
        assert d12.bot - 1e-3 <= 0.6 <= d12.top + 1e-3

    for d21 in fn.take(10, mdth.gen_directed_dists(p2, p1)):
        assert d21.bot - 1e-3 <= 0.8 <= d21.top + 1e-3

    for d in fn.take(10, mdth.gen_dists(p1, p2)):
        assert d.bot - 1e-3 <= 0.8 <= d.top + 1e-3
예제 #3
0
파일: mode.py 프로젝트: spaceone/charla
def process_channel_modes(user, channel, modes):
    op = None
    modes = iter(modes)
    while True:
        try:
            mode = next(modes)

            if mode and mode[0] == u("+"):
                op = u("+")
                mode = mode[1:]
            elif mode and mode[0] == u("-"):
                op = u("-")
                mode = mode[1:]

            if mode not in channel_modes:
                yield False, ERR_UNKNOWNMODE(mode)
            else:
                nargs, f = channel_modes[mode]
                for notify, message in f(user,
                                         channel,
                                         mode,
                                         *take(nargs, modes),
                                         op=op):
                    yield notify, message
        except StopIteration:
            break
예제 #4
0
    def get(self):
        """
        Retrieve up to 20 queries modified in the last 7 days.

        Responds with a list of :ref:`query <query-response-label>` objects.
        """

        if settings.FEATURE_DUMB_RECENTS:
            results = models.Query.by_user(self.current_user).order_by(
                models.Query.updated_at.desc()).limit(10)
            queries = [
                q.to_dict(with_last_modified_by=False, with_user=False)
                for q in results
            ]
        else:
            queries = models.Query.recent(self.current_user.group_ids,
                                          self.current_user.id)
            recent = [
                d.to_dict(with_last_modified_by=False, with_user=False)
                for d in queries
            ]

            global_recent = []
            if len(recent) < 10:
                global_recent = [
                    d.to_dict(with_last_modified_by=False, with_user=False)
                    for d in models.Query.recent(self.current_user.group_ids)
                ]

            queries = take(
                20,
                distinct(chain(recent, global_recent), key=lambda d: d['id']))

        return queries
예제 #5
0
    def curation_stats(self):
        trailing_24hr_t = time.time() - datetime.timedelta(
            hours=24).total_seconds()
        trailing_7d_t = time.time() - datetime.timedelta(
            days=7).total_seconds()

        reward_24h = 0.0
        reward_7d = 0.0

        for reward in take(5000,
                           self.history_reverse(filter_by="curation_reward")):

            timestamp = parse_time(reward["timestamp"]).timestamp()
            if timestamp > trailing_7d_t:
                reward_7d += Amount(reward["reward"]).amount

            if timestamp > trailing_24hr_t:
                reward_24h += Amount(reward["reward"]).amount

        reward_7d = self.converter.vests_to_sp(reward_7d)
        reward_24h = self.converter.vests_to_sp(reward_24h)
        return {
            "24hr": reward_24h,
            "7d": reward_7d,
            "avg": reward_7d / 7,
        }
예제 #6
0
    def get(self):
        """
        Lists dashboards modified in the last 7 days.
        """
        if settings.FEATURE_DUMB_RECENTS:
            dashboards = models.Dashboard.all(
                self.current_org, self.current_user.group_ids,
                self.current_user.id).order_by(
                    models.Dashboard.updated_at.desc()).limit(10)
            dashboards = [d.to_dict() for d in dashboards]
        else:
            recent = [
                d.to_dict()
                for d in models.Dashboard.recent(self.current_org,
                                                 self.current_user.group_ids,
                                                 self.current_user.id,
                                                 for_user=True)
            ]

            global_recent = []
            if len(recent) < 10:
                global_recent = [
                    d.to_dict() for d in models.Dashboard.recent(
                        self.current_org, self.current_user.group_ids,
                        self.current_user.id)
                ]

            dashboards = take(
                20,
                distinct(chain(recent, global_recent), key=lambda d: d['id']))

        return dashboards
예제 #7
0
    def take(self, limit=5):
        """ Take up to n (n = limit) posts/comments at a time.

        You can call this method as many times as you want. Once
        there are no more posts to take, it will return [].

        Returns:
            List of posts/comments in a batch of size up to `limit`.
        """
        # get main posts only
        comment_filter = is_comment if self.comments_only else complement(
            is_comment)
        hist = filter(comment_filter, self.history)

        # filter out reblogs
        hist2 = filter(lambda x: x["author"] == self.account.name, hist)

        # post edits will re-appear in history
        # we should therefore filter out already seen posts
        def ensure_unique(post):
            if post["permlink"] not in self.seen_items:
                self.seen_items.add(post["permlink"])
                return True

        unique = filter(ensure_unique, hist2)

        serialized = filter(bool, map(silent(Post), unique))

        batch = take(limit, serialized)
        return batch
예제 #8
0
def assert_contracting(elems):
    max_size = 1
    for size in fn.take(7, elems):
        assert 0 <= size <= max_size
        max_size = size

    assert max_size < 0.5
예제 #9
0
파일: commands.py 프로젝트: sirex/databot
 def run(self, args):
     key = parse_expression(args.key, args.expression)
     exclude = args.exclude.split(',') if args.exclude else None
     source = self.pipe(args.source)
     target = self.pipe(args.target)
     errors = target(source).errors(key, reverse=True)
     errors = funcy.take(args.limit, errors)
     self.bot.output.errors(errors, exclude)
예제 #10
0
파일: queries.py 프로젝트: yokotty/redash
    def get(self):
        recent = [d.to_dict() for d in models.Query.recent(current_user.id)]

        global_recent = []
        if len(recent) < 10:
            global_recent = [d.to_dict() for d in models.Query.recent()]

        return take(20, distinct(chain(recent, global_recent), key=lambda d: d['id']))
예제 #11
0
파일: commands.py 프로젝트: sirex/databot
 def run(self, args):
     key = parse_expression(args.key, args.expression)
     exclude = args.exclude.split(',') if args.exclude else None
     source = self.pipe(args.source)
     target = self.pipe(args.target)
     errors = target(source).errors(key, reverse=True)
     errors = funcy.take(args.limit, errors)
     self.bot.output.errors(errors, exclude)
예제 #12
0
파일: dashboards.py 프로젝트: zoiew/redash
    def get(self):
        recent = [d.to_dict() for d in models.Dashboard.recent(self.current_org, self.current_user.groups, self.current_user.id, for_user=True)]

        global_recent = []
        if len(recent) < 10:
            global_recent = [d.to_dict() for d in models.Dashboard.recent(self.current_org, self.current_user.groups, self.current_user.id)]

        return take(20, distinct(chain(recent, global_recent), key=lambda d: d['id']))
예제 #13
0
def _mygene_fetch(queries, scopes, specie):
    # To retry or ignore only one chunk on error
    @ignore(requests.HTTPError, default=[])
    @log_errors(lambda msg: cprint(msg, 'red'), stack=False)
    @retry(10, errors=requests.HTTPError, timeout=lambda n: 5 * 1.4**n)
    @log_errors(lambda msg: cprint(msg, 'yellow'), stack=False)
    def querymany(qs):
        try:
            return mg.querymany(qs,
                                scopes=scopes,
                                fields=['entrezgene', 'symbol'],
                                species=specie,
                                email='*****@*****.**',
                                verbose=False)
        except requests.HTTPError as e:
            # Do not retry on Bad Request
            if e.response.status_code == 400:
                return []
            raise

    cprint('> Going to query %d genes in %s...' % (len(queries), scopes),
           'cyan')
    cprint('>     sample queries: %s' % ', '.join(take(8, queries)), 'cyan')
    # Read cache
    prefix = '%s-%s:' % (SPECIE_PREFIXES[specie], PREFIXES[scopes])
    keys = [prefix + q for q in queries]
    res = {
        k: pickle.loads(v) if v else ''
        for k, v in zip(queries, mget(keys)) if v is not None
    }
    if res:
        queries = set(queries) - set(res)
        print(
            ('Got %d from cache, %d queries left' % (len(res), len(queries))))

    if queries:
        mg = mygene.MyGeneInfo()
        # Looks like sorting groups bad queries
        data = cat(
            querymany(qs)
            for qs in chunks(500, tqdm(sorted(queries), leave=False)))
        new = {
            str(item['query']): (item['entrezgene'], item['symbol'])
            for item in data if not item.get('notfound')
            and 'entrezgene' in item and 'symbol' in item
        }
        res.update(new)
        # Cache results and fails
        pipe = redis_client.pipeline(transaction=False)
        for k, v in new.items():
            pipe.setex(prefix + k, CACHE_TIMEOUT, pickle.dumps(v, -1))
        for k in queries - set(new):
            pipe.setex(prefix + k, CACHE_TIMEOUT, '')
        pipe.execute()

    res = {k: v for k, v in res.items() if v != ''}
    cprint('-> Got %d matches' % len(res), 'yellow')
    return res
예제 #14
0
    def get(self):
        queries = models.Query.recent(self.current_user.groups, self.current_user.id)
        recent = [d.to_dict(with_last_modified_by=False) for d in queries]

        global_recent = []
        if len(recent) < 10:
            global_recent = [d.to_dict(with_last_modified_by=False) for d in models.Query.recent(self.current_user.groups)]

        return take(20, distinct(chain(recent, global_recent), key=lambda d: d['id']))
예제 #15
0
파일: queries.py 프로젝트: 5t111111/redash
    def get(self):
        queries = models.Query.recent(self.current_user.groups, self.current_user.id)
        recent = [d.to_dict(with_last_modified_by=False) for d in queries]

        global_recent = []
        if len(recent) < 10:
            global_recent = [d.to_dict(with_last_modified_by=False) for d in models.Query.recent(self.current_user.groups)]

        return take(20, distinct(chain(recent, global_recent), key=lambda d: d['id']))
예제 #16
0
    def get(self):
        """
        Lists dashboards modified in the last 7 days.
        """
        recent = [d.to_dict() for d in models.Dashboard.recent(self.current_org, self.current_user.group_ids, self.current_user.id, for_user=True)]

        global_recent = []
        if len(recent) < 10:
            global_recent = [d.to_dict() for d in models.Dashboard.recent(self.current_org, self.current_user.group_ids, self.current_user.id)]

        return take(20, distinct(chain(recent, global_recent), key=lambda d: d['id']))
예제 #17
0
def shuffle(bot, nodes, args) -> Node:

    max = args.get('max')  # None works as inf
    batch = args.get('batch', max)  # None works as inf
    nodes = take(batch, nodes)
    if max and max <= len(nodes):
        nodes = random.sample(nodes, k=max)
    else:
        nodes = random.sample(nodes, k=len(nodes))

    return nodes, {}
예제 #18
0
def main():
    (x_train, y_train), (x_validation, y_validation) = load_data()

    model = Model(*juxt(identity, computational_graph(y_train.shape[1]))(Input(
        shape=x_train.shape[1:])))
    model.compile(loss='categorical_crossentropy',
                  optimizer=SGD(momentum=0.9),
                  metrics=['accuracy'
                           ])  # 論文にはnesterov=Trueだと書いてあったけど、コードだとFalseだった……。

    model.summary()
    # plot_model(model, to_file='./results/model.png')

    train_data = ImageDataGenerator(featurewise_center=True,
                                    featurewise_std_normalization=True,
                                    width_shift_range=0.125,
                                    height_shift_range=0.125,
                                    horizontal_flip=True)
    validation_data = ImageDataGenerator(featurewise_center=True,
                                         featurewise_std_normalization=True)

    for data in (train_data, validation_data):
        data.fit(x_train)  # 実用を考えると、x_validationでのfeaturewiseのfitは無理だと思う……。

    batch_size = 128
    epoch_size = 200

    results = model.fit_generator(
        train_data.flow(x_train, y_train, batch_size=batch_size),
        steps_per_epoch=x_train.shape[0] // batch_size,
        epochs=epoch_size,
        callbacks=[
            LearningRateScheduler(
                partial(
                    getitem,
                    tuple(
                        take(
                            epoch_size,
                            concat(repeat(0.1, 60), repeat(0.02, 60),
                                   repeat(0.004, 40), repeat(0.0008))))))
        ],
        validation_data=validation_data.flow(x_validation,
                                             y_validation,
                                             batch_size=batch_size),
        validation_steps=x_validation.shape[0] // batch_size)

    with open('./results/history.pickle', 'wb') as f:
        pickle.dump(results.history, f)

    save_model(model, './results/model.h5')

    del model
예제 #19
0
def update_account_ops_quick(mongo, username, batch_size=200, steemd_instance=None):
    """ Only update the latest history, limited to 1 batch of defined batch_size. """
    start_index = account_operations_index(mongo, username)

    # fetch latest records and update the db
    history = \
        Account(username,
                steemd_instance=steemd_instance).history_reverse(batch_size=batch_size)
    for event in take(batch_size, history):
        if event['index'] < start_index:
            return
        with suppress(DuplicateKeyError):
            mongo.AccountOperations.insert_one(json_expand(typify(event)))
예제 #20
0
파일: queries.py 프로젝트: appfolio/redash
    def get(self):
        """
        Retrieve up to 20 queries modified in the last 7 days.

        Responds with a list of :ref:`query <query-response-label>` objects.
        """
        queries = models.Query.recent(self.current_user.group_ids, self.current_user.id)
        recent = [d.to_dict(with_last_modified_by=False) for d in queries]

        global_recent = []
        if len(recent) < 10:
            global_recent = [d.to_dict(with_last_modified_by=False) for d in models.Query.recent(self.current_user.group_ids)]

        return take(20, distinct(chain(recent, global_recent), key=lambda d: d['id']))
예제 #21
0
    def get(self):
        """
        Lists dashboards modified in the last 7 days.
        """
        if settings.FEATURE_DUMB_RECENTS:
            dashboards = models.Dashboard.all(self.current_org, self.current_user.group_ids, self.current_user.id).order_by(models.Dashboard.updated_at.desc()).limit(10)
            dashboards = [d.to_dict() for d in dashboards]
        else:
            recent = [d.to_dict() for d in models.Dashboard.recent(self.current_org, self.current_user.group_ids, self.current_user.id, for_user=True)]

            global_recent = []
            if len(recent) < 10:
                global_recent = [d.to_dict() for d in models.Dashboard.recent(self.current_org, self.current_user.group_ids, self.current_user.id)]

            dashboards = take(20, distinct(chain(recent, global_recent), key=lambda d: d['id']))

        return dashboards
예제 #22
0
 def get_statistical_data(self, drug: str) -> Optional[Tuple[float, float]]:
     # print(list(self.blood_level_factors.keys()))
     # print(drug)
     if drug not in self.blood_level_factors:
         return None
     blood_levels = list(
         drop(
             7 * 24,
             take(
                 self.real_duration,
                 map(
                     lambda x: x[0] * x[1][0],
                     zip(self.drugs_timeline[drug],
                         self.factor_timeline[drug])))))
     levels_avg = sum(blood_levels) / len(blood_levels)
     sq_delta = list(map(lambda x: (x - levels_avg)**2, blood_levels))
     levels_std_dev = math.sqrt(sum(sq_delta) / len(blood_levels))
     return levels_avg, levels_std_dev
예제 #23
0
def run_iterable(iterable, name, query):
    try:
        items = list(fn.take(20, iterable))
    except Exception as e:
        return dict(text=f"Query failed:{e}")
    resp = "\n".join("".join(l.val) for l in items[:20])
    count = len(items)
    if count < 20:
        cstr = f'{count}'
    else:
        cstr = 'First 20'
    return {
        "text": f"{cstr} results for `{name}({query})`.",
        "response_type": "in_channel",
        "attachments": [{
            "text": resp
        }]
    }
    def get(self):
        """
        Retrieve up to 20 queries modified in the last 7 days.

        Responds with a list of :ref:`query <query-response-label>` objects.
        """
        queries = models.Query.recent(self.current_user.group_ids,
                                      self.current_user.id)
        recent = [d.to_dict(with_last_modified_by=False) for d in queries]

        global_recent = []
        if len(recent) < 10:
            global_recent = [
                d.to_dict(with_last_modified_by=False)
                for d in models.Query.recent(self.current_user.group_ids)
            ]

        return take(
            20, distinct(chain(recent, global_recent), key=lambda d: d['id']))
예제 #25
0
파일: queries.py 프로젝트: kitsuyui/redash
    def get(self):
        """
        Retrieve up to 20 queries modified in the last 7 days.

        Responds with a list of :ref:`query <query-response-label>` objects.
        """

        if settings.FEATURE_DUMB_RECENTS:
            results = models.Query.by_user(self.current_user).order_by(models.Query.updated_at.desc()).limit(10)
            queries = [q.to_dict(with_last_modified_by=False, with_user=False) for q in results]
        else:
            queries = models.Query.recent(self.current_user.group_ids, self.current_user.id)
            recent = [d.to_dict(with_last_modified_by=False, with_user=False) for d in queries]

            global_recent = []
            if len(recent) < 10:
                global_recent = [d.to_dict(with_last_modified_by=False, with_user=False) for d in models.Query.recent(self.current_user.group_ids)]

            queries = take(20, distinct(chain(recent, global_recent), key=lambda d: d['id']))

        return queries
예제 #26
0
파일: mode.py 프로젝트: MrSwiss/charla
def process_channel_modes(user, channel, modes):
    op = None
    modes = iter(modes)
    while True:
        try:
            mode = next(modes)

            if mode and mode[0] == u("+"):
                op = u("+")
                mode = mode[1:]
            elif mode and mode[0] == u("-"):
                op = u("-")
                mode = mode[1:]

            if mode not in channel_modes:
                yield False, ERR_UNKNOWNMODE(mode)
            else:
                nargs, f = channel_modes[mode]
                for notify, message in f(user, channel, mode, *take(nargs, modes), op=op):
                    yield notify, message
        except StopIteration:
            break
예제 #27
0
    def get(self):
        """
        Lists dashboards modified in the last 7 days.
        """
        recent = [
            d.to_dict()
            for d in models.Dashboard.recent(self.current_org,
                                             self.current_user.group_ids,
                                             self.current_user.id,
                                             for_user=True)
        ]

        global_recent = []
        if len(recent) < 10:
            global_recent = [
                d.to_dict() for d in models.Dashboard.recent(
                    self.current_org, self.current_user.group_ids,
                    self.current_user.id)
            ]

        return take(
            20, distinct(chain(recent, global_recent), key=lambda d: d['id']))
예제 #28
0
    def get(self):
        """
        Lists dashboards modified in the last 7 days.
        """
        if settings.FEATURE_DUMB_RECENTS:
            dashboards = models.Dashboard.all(
                self.current_org, self.current_user.group_ids,
                self.current_user.id).order_by(
                    models.Dashboard.updated_at.desc()).limit(10)
            dashboards = [d.to_dict() for d in dashboards]
        else:
            recent = [
                d.to_dict()
                for d in models.Dashboard.recent(self.current_org,
                                                 self.current_user.group_ids,
                                                 self.current_user.id,
                                                 for_user=True)
            ]
            # 获取该用户最近的dashboard

            global_recent = []
            # 如果数量太少,就把全局的的dashboard也提取出来
            if len(recent) < 10:
                global_recent = [
                    d.to_dict() for d in models.Dashboard.recent(
                        self.current_org, self.current_user.group_ids,
                        self.current_user.id)
                ]

            # chain  把一组可迭代对象连接成一个更大的迭代对象

            # 把这两种dashboard合并在一起,然后最多取出前20个
            dashboards = take(
                20,
                distinct(chain(recent, global_recent), key=lambda d: d['id']))

        return dashboards
예제 #29
0
def	main():

	#
	# CIFAR-10
	#
	cifar = CIFAR_10()

	#
	# x_train.shape		= (50000, 32, 32, 3)
	# y_train.shape		= (50000, 10)
	# x_validation.shape= (10000, 32, 32, 3)
	# y_validation.shape= (10000, 10)
	#
	data			= cifar.load_data()
	x_train			= data['training_data']
	y_train			= data['training_label']
	x_validation	= data['validation_data']
	y_validation	= data['validation_label']
	print("x_train.shape=", x_train.shape)
	print("y_train.shape=", y_train.shape)
	print("x_validation.shape=", x_validation.shape)
	print("y_validation.shape=", y_validation.shape)


	#
	# SqueezeNet
	#
	squeeze = SqueezeNet()
	i = Input(shape=x_train.shape[1:])
	o = squeeze.make_graph(y_train.shape[1])(i)

	#
	# model
	#
	model = Model(inputs=i, outputs=o)

	#
	# compile model
	#
	model.compile(
			loss='categorical_crossentropy',
			optimizer=SGD(momentum=0.9),
			metrics=['accuracy']
			)

	#
	# generator in ImageDataGenerator by keras
	#
	train_data = ImageDataGenerator(
			featurewise_center=True,
			featurewise_std_normalization=True,
			width_shift_range=0.125,
			height_shift_range=0.125,
			horizontal_flip=True
			)
	validation_data = ImageDataGenerator(
			featurewise_center=True,
			featurewise_std_normalization=True
			)
	for data in (train_data, validation_data):
		data.fit(x_train)  # 実用を考えると、x_validationでのfeaturewiseのfitは無理だと思う… … 。

	#
	# check pickle
	#
	# file_pickle = "./results/history.pickle"
	model_path		= "./results"
	model_file  	= model_path + "/model.h5"
	model_weights	= model_path + "/weights.h5"
	print(f"models: model={model_file}, weight={model_weights}" )
	# print(f"models: arch  =", options['file_arch'])
	# print(f"models: weight=", options['model_weights'])
	if not path.exists(model_path):
		os.mkdir(model_path)

	#
	# print model
	#
	from lib_utils import print_model_summary
	print_model_summary(model, "./results/network.txt", "model.png")


	#
	# check model, if not exist trained model, we have to make trained parameters for model.
	#
	if not path.exists(model_file):

		#
		# fit generator
		#
		batch_size = 1000	# 100
		epochs     = 1		# 200
		results = model.fit_generator(
			#
			# generate train data (ImageDataGenerator by keras)
			#
			train_data.flow(x_train, y_train, batch_size=batch_size),

			#
			# steps/epoch
			#
			steps_per_epoch=x_train.shape[0] // batch_size,

			#
			# epoch
			#
			epochs=epochs,

			#
			# callbacks
			#
			callbacks = [
				LearningRateScheduler(
					partial(
						getitem,
						tuple(take(epochs, concat(repeat(0.010, 1), repeat(0.100, 99), repeat(0.010, 50), repeat(0.001))))
						)
					)
				],
			#
			# generate validation data (ImageDataGenerator by keras)
			#
			validation_data=validation_data.flow(x_validation, y_validation, batch_size=batch_size),

			#
			# validation step
			#
			validation_steps=x_validation.shape[0] // batch_size,

			#
			# max_queue_size
			#
			max_queue_size=4
			)

		#
		# save keras model
		#
		from lib_utils import save_model_by_keras
		save_model_by_keras(model, model_file, model_weights)

		# del model

	else:
		#
		# load keras model
		#
		if path.exists(model_file):
			print("load model...")
			from lib_utils import load_model_by_keras
			model = load_model_by_keras(model_file, model_weights)
			print("load model...done")
		else:
			print("load model...: not found=", model_file, model_weights )

	#
	# check version
	#
	from lib_utils import get_version
	get_version(model_file)

		
	#
	# evaluate
	#
	"""
	print("model evaluate...")
	score = lmodel.evaluate(x_validation, y_validation, verbose=1)
	print("model evaluate: loss=", score[0])
	print("model evaluate: accuracy=", score[1])
	"""

	#
	# prediction
	#
	print("model prediction...")
	# lmodel.predict(y_validation.shape[1])
	# lmodel.predict(x_train.shape[1:])
	print("x_validation.shape=", x_validation.shape)
	print("x_validation.shape[0]=", x_validation.shape[0])
	print("x_validation.shape[1]=", x_validation.shape[1])
	print("x_validation.shape[2]=", x_validation.shape[2])
	print("x_validation.shape[3]=", x_validation.shape[3])
	i0 = x_validation[0:1]
	i1 = x_validation.reshape(10000,32,32,3)
	i2 = i1[0]
	print("i0.shape=", i0.shape)
	print("i1.shape=", i1.shape)
	print("i2.shape=", i2.shape)
	# lmodel.predict(i0, verbose=1)
	predo = model.predict(x_validation, verbose=1)[0]
	print(predo)

	"""
	"""
	preds = model.predict(x_validation, verbose=1)

	# for pre in preds:
	# 	y = pre.argmax()
	# 	print("label: ", y_validation[y])

	print('done')
예제 #30
0
def main():
    import os
    with tf.device("/cpu:0"):
        (x_train, y_train), (x_validation, y_validation) = load_data()

    batch_size = 32
    epochs = 200
    input_shape = Input(shape=x_train.shape[1:])
    model_file = './results/model.h5'
    if os.path.exists(model_file):
        model = load_model(model_file)
        # with tf.device("/cpu:0"):
        #     validation_data = ImageDataGenerator(featurewise_center=True, featurewise_std_normalization=True)
    else:
        model = Model(*juxt(identity, computational_graph(y_train.shape[1]))(
            input_shape))
        model.compile(loss='categorical_crossentropy',
                      optimizer=SGD(momentum=0.9),
                      metrics=['accuracy'])

        with tf.device("/cpu:0"):
            train_data = ImageDataGenerator(featurewise_center=True,
                                            featurewise_std_normalization=True,
                                            width_shift_range=0.125,
                                            height_shift_range=0.125,
                                            horizontal_flip=True)
            validation_data = ImageDataGenerator(
                featurewise_center=True, featurewise_std_normalization=True)

        for data in (train_data, validation_data):
            data.fit(
                x_train)  # 実用を考えると、x_validationでのfeaturewiseのfitは無理だと思う……。

        results = model.fit_generator(
            train_data.flow(x_train, y_train, batch_size=batch_size),
            steps_per_epoch=x_train.shape[0] // batch_size,
            epochs=epochs,
            callbacks=[
                LearningRateScheduler(
                    partial(
                        getitem,
                        tuple(
                            take(
                                epochs,
                                concat(repeat(0.01, 1), repeat(0.1, 99),
                                       repeat(0.01, 50), repeat(0.001))))))
            ],
            validation_data=validation_data.flow(x_validation,
                                                 y_validation,
                                                 batch_size=batch_size),
            validation_steps=x_validation.shape[0] // batch_size)

        with open('./results/history.pickle', 'wb') as f:
            pickle.dump(results.history, f)
        save_model(model, model_file)

    try:
        with tf.device("/cpu:0"):
            # model.summary()
            # print("=== AFTER POPPING THE LAST ===")
            model.layers.pop()
            # model.summary()
            # generate_confusion_matrix(model, x_validation, y_validation, batch_size)
            # plot_model(model, to_file='./results/model.png')
    except Exception as ex:
        print("plot_model failed with error:", repr(ex), "\nMoving on...")

    siamese(input_shape, model)
예제 #31
0
def dhead(d, n=5):
    return funcy.project(d, funcy.take(n, d.keys()))
예제 #32
0
def test_mpc_smoke():
    from magnum.examples.feasible_example import feasible_example as g

    results = list(fn.take(10, echo_env_mpc(g)))
    assert len(results) == 10
예제 #33
0
def take(n, seq=None):
    return F.take(n,seq) if seq \
    else lambda xs: F.take(n,xs)
예제 #34
0
    def get_plot_data(self,
                      plot_delta: timedelta = timedelta(days=1),
                      adjusted: bool = False,
                      stddev_multiplier: float = 1.0,
                      offset: float = 0.0,
                      color: bool = False,
                      use_x_date: bool = False) -> \
            Tuple[np.ndarray, Dict[str, plot_data_type]]:
        t_arr = take(
            self.duration,
            map(
                lambda x: x * (self.step.total_seconds() / plot_delta.
                               total_seconds()) + offset, count()))
        if use_x_date:
            t_arr = map(
                lambda x: datetime.combine(self.starting_date, time()) +
                plot_delta * x, t_arr)
        t_arr = np.array(list(t_arr))

        # print(f't_arr.size()={len(t_arr)}')
        out = {}
        drugs = sorted(list(self.drugs_timeline.keys()),
                       key=lambda x: self.drugs[x].name)

        step_time_d = int(self.step.total_seconds())

        steps = (
            int(
                math.ceil(
                    int(timedelta(days=self.step_days[0]).total_seconds()) /
                    step_time_d)),
            int(
                math.ceil(
                    int(timedelta(days=self.step_days[1]).total_seconds()) /
                    step_time_d)),
            int(
                math.ceil(
                    int(timedelta(days=self.step_days[2]).total_seconds()) /
                    step_time_d)))

        self.running_average = {}
        self.running_stddev = {}

        for n, drug in enumerate(drugs):
            # print(f"{n}: {drug}")
            timeline = self.drugs_timeline[drug]
            drug_name = self.drugs[drug].name_blood
            # print(f"{steps}/{len(timeline)}")

            if self.drugs[drug].factor != 1.0:
                drug_name += f" (x{self.drugs[drug].factor})"

            if adjusted and drug in self.blood_level_factors and len(
                    self.blood_level_factors[drug]) > 0:
                # print(self.blood_level_factors)
                factor_timeline = []
                ev_num = 0
                for t in range(len(timeline)):
                    t_time = datetime.combine(self.starting_date,
                                              time()) + t * self.step
                    if len(self.events) > 0:
                        if len(self.blood_level_factors[drug]) > ev_num + 1:
                            if datetime.combine(self.events[ev_num][0], time())+self.events[ev_num][1] > \
                                    t_time > datetime.combine(self.events[ev_num][0], time()):
                                factor: timedelta = t_time - datetime.combine(
                                    self.events[ev_num][0], time())
                                factor: float = factor / self.events[ev_num][1]
                                factor_timeline.append((
                                    self.blood_level_factors[drug][ev_num +
                                                                   1][0] *
                                    factor +
                                    self.blood_level_factors[drug][ev_num][0] *
                                    (1 - factor),
                                    self.blood_level_factors[drug][ev_num +
                                                                   1][1] *
                                    factor +
                                    self.blood_level_factors[drug][ev_num][1] *
                                    (1 - factor)))
                            elif datetime.combine(
                                    self.events[ev_num][0],
                                    time()) + self.events[ev_num][1] <= t_time:
                                ev_num += 1
                                factor_timeline.append(
                                    self.blood_level_factors[drug][ev_num])
                            else:
                                factor_timeline.append(
                                    self.blood_level_factors[drug][ev_num])
                        else:
                            factor_timeline.append(
                                self.blood_level_factors[drug][ev_num])
                    else:
                        factor_timeline.append(
                            self.blood_level_factors[drug][0])
                self.factor_timeline[drug] = factor_timeline

                list_avg = lmap(lambda x: x[0] * x[1][0],
                                zip(timeline, factor_timeline))
                arr_avg = np.array(list_avg)
                arr_min = np.array(
                    lmap(
                        lambda x: x[0] * x[1][0] - x[1][1] * stddev_multiplier,
                        zip(timeline, factor_timeline)))
                arr_max = np.array(
                    lmap(
                        lambda x: x[0] * x[1][0] + x[1][1] * stddev_multiplier,
                        zip(timeline, factor_timeline)))

                mp_ctx = mp.get_context('fork')
                statistics_data: List[Tuple[Sequence[int], List[float], int]]
                statistics_data = [(steps, list_avg, i) for i in range(3)]
                with mp_ctx.Pool(3) as mp_pool:
                    statistics_results = mp_pool.map(
                        calculate_running_statistics, statistics_data)

                running_average, running_std_dev = tuple(
                    map(list, list(zip(*statistics_results))))

                self.running_average[drug_name] = tuple(
                    map(np.array, running_average))
                self.running_stddev[drug_name] = tuple(
                    map(np.array, running_std_dev))

                if color:
                    # print(f"{drug}: {n} => {get_color(n)}")
                    out[drug_name] = (arr_avg, arr_min, arr_max, get_color(n))
                else:
                    out[drug_name] = (arr_avg, arr_min, arr_max)
            else:
                arr = np.array(timeline) * self.drugs[drug].factor
                if color:
                    out[drug_name] = (arr, arr, arr, get_color(n))
                else:
                    out[drug_name] = (arr, arr, arr)
            # print(f't_arr.size({drug.name})={len(out[drug.name])}')
        return t_arr, out
예제 #35
0
def upload_post(bot: Bot, nodes,  args):

    def download_media(url):
        data = urllib.request.urlopen(url).read()
        print(fleep.get(data[:300]).extension)
        return data

    def binary_data(node):
        is_url = isinstance(node, Arg) and 'http' in node.value
        is_media = isinstance(node, Media)
        is_path = isinstance(node, Arg) and not 'http' in node.value
        switch = {
            is_url: lambda: download_media(node.value),
            is_path: lambda: load(node.value),
            is_media: lambda: download_media(node.images[0]),
        }
        return switch[True]()


    def get_geotag_data(name):
        return bot.api.location_search(bot.latitude, bot.longitude, query=name,)['venues'][0]

    max = args.get('max') or 1
    caption = args.get('caption') or ''
    geotag = args.get('geotag')
    disable_comments = bool(args.get('disable_comments'))

    nodes = take(max, nodes)

    if len(nodes) == 0:
        bot.logger.error('no medias to upload')

    if max == 1: # only 1 media


        node = nodes[0]
        # print(node.carousel_media)
        # print(node.sources[0])
        kwargs = dict(
              caption=caption,
              to_reel=False,
        )

        if geotag:
              kwargs.update(dict(location=get_geotag_data(geotag),))

        if disable_comments is not None:
              kwargs.update(dict(disable_comments=disable_comments))

        data = binary_data(node)
        with temporary_write(data) as path:
                extensions = fleep.get(data[:128]).extension
                ext = extensions[0] if len(extensions) > 0 else None
                if ext in SUPPORTED_IMAGE_EXT:
                    bot.logger.info('uploading img')
                    kwargs.update(make_photo_args(data, path))
                    res = bot.api.post_photo(**kwargs)
                    uploaded_media = Media(**res.get('media',{}))
                    # print(json.dumps(res, indent=4))

                elif ext in SUPPORTED_VIDEO_EXT:
                    bot.logger.info('uploading video')
                    kwargs.update(make_video_args(data, path))
                    res = bot.api.post_video(**kwargs)
                    uploaded_media = Media(**res.get('media',{}))

                else:
                      raise RuntimeError(f'unsupportd media type {fleep.get(data[:128]).extension} for {node}')

                bot.logger.info(f'uploaded media {uploaded_media}')
                return [uploaded_media], {}



    else: # album upload

        uploads= []

        for node in nodes:

            data = binary_data(node)

            with temporary_write(data) as path:
                extensions = fleep.get(data[:128]).extension
                ext = extensions[0] if len(extensions) > 0 else None
                if ext in SUPPORTED_IMAGE_EXT:
                    uploads.append(make_photo_args(data, path))

                elif ext in SUPPORTED_VIDEO_EXT:
                    uploads.append(make_video_args(data, path))

                else:
                    raise RuntimeError(f'unsupportd media type {fleep.get(data[:128]).extension} for {node}')

        kwargs = dict(
            caption=caption,
            medias=uploads,
        )

        if geotag:
              kwargs.update(dict(location=get_geotag_data(geotag),))

        # if disable_comments is not None:
        #       kwargs.update(dict(disable_comments=bool(disable_comments)))

        res = bot.api.post_album(**kwargs)
        uploaded_media = Media(**res.get('media',{}))
        bot.logger.info(f'uploaded album {uploaded_media}')

        return [uploaded_media], {}
def run():
    batch_size = 32
    num_classes = 10
    epochs = 200

    with tf.device("/cpu:0"):
        (x_train, y_train), (x_test, y_test) = cifar10.load_data()
        # Convert class vectors to binary class matrices.
        y_train = to_categorical(y_train, num_classes)
        y_test = to_categorical(y_test, num_classes)

        x_train = x_train.astype('float32', copy=False)
        x_test = x_test.astype('float32', copy=False)
        x_train /= 255
        x_test /= 255

    optimizer = Adam(lr=0.001)

    model = SqueezeNet(classes=num_classes)
    squeezenet_model_file = './sqz_log/model.h5'
    if os.path.exists(squeezenet_model_file):
        model.layers.pop()
        model = Model(name="sqzn_no_softmax",
                      inputs=model.input,
                      outputs=model.layers[-1].output)
        model.load_weights(squeezenet_model_file, by_name=True)
        # model.load_weights(squeezenet_model_file, by_name=True)
    else:
        # train a new SqueezeNet
        model.compile(loss='categorical_crossentropy',
                      optimizer=optimizer,
                      metrics=['accuracy'])

        # train_data = ImageDataGenerator(featurewise_center=True, featurewise_std_normalization=True,
        #                                 width_shift_range=0.125, height_shift_range=0.125, horizontal_flip=True)
        # validation_data = ImageDataGenerator(featurewise_center=True, featurewise_std_normalization=True)
        train_data = ImageDataGenerator()
        validation_data = ImageDataGenerator()
        for data in (train_data, validation_data):
            data.fit(x_train)

        callbacks = [
            LearningRateScheduler(
                partial(
                    getitem,
                    tuple(
                        take(
                            epochs,
                            concat(repeat(0.01, 1), repeat(0.1, 99),
                                   repeat(0.01, 50), repeat(0.001)))))),
            ModelCheckpoint(filepath=squeezenet_model_file),
            TensorBoard(log_dir="./sqz_log", batch_size=batch_size)
        ]
        results = model.fit_generator(
            train_data.flow(x_train, y_train, batch_size=batch_size),
            steps_per_epoch=x_train.shape[0] // batch_size,
            epochs=epochs,
            callbacks=callbacks,
            validation_data=validation_data.flow(x_test,
                                                 y_test,
                                                 batch_size=batch_size),
            validation_steps=x_test.shape[0] // batch_size)

        with open('./sqz_log/history.pickle', 'wb') as f:
            pickle.dump(results.history, f)
        save_model(model, squeezenet_model_file)

    # Build the siamese architecture
    # model_cut = Model(name="sqzn_no_softmax", inputs=model.input, outputs=model.layers[-1].output)
    # model_cut.load_weights(squeezenet_model_file, by_name=True)
    # with tf.device("/cpu:0"):
    #     model_cut.summary()

    input_shape = x_train.shape[1:]

    im_in1 = Input(shape=input_shape)
    im_in2 = Input(shape=input_shape)
    feat_x1 = model(im_in1)
    feat_x2 = model(im_in2)
    lambda_merge = Lambda(euclidean_distance,
                          output_shape=(1, ))([feat_x1, feat_x2])

    siamese = Model(name="siamese",
                    inputs=[im_in1, im_in2],
                    outputs=lambda_merge)
    with tf.device("/cpu:0"):
        siamese.summary()

    optimizer = RMSprop()  # SGD(momentum=0.9)
    siamese.compile(optimizer=optimizer,
                    loss=contrastive_loss,
                    metrics=[accuracy])

    def make_img_pair(identical, from_train):
        """Select the image pairs"""
        label = np.random.randint(0, num_classes)
        if identical:
            if from_train:
                idx = np.nonzero(y_train[:, label] == 1)[0]
            else:
                idx = np.nonzero(y_test[:, label] == 1)[0]

            # pick any two indexes randomly
            id1 = np.random.randint(0, idx.shape[0])
            id2 = np.random.randint(0, idx.shape[0])
            while id1 == id2:
                id2 = np.random.randint(0, idx.shape[0])
        else:
            if from_train:
                idx1 = np.nonzero(y_train[:, label] == 1)[0]
                idx2 = np.nonzero(y_train[:,
                                          (label + 1) % num_classes] == 1)[0]
            else:
                idx1 = np.nonzero(y_test[:, label] == 1)[0]
                idx2 = np.nonzero(y_train[:,
                                          (label + 1) % num_classes] == 1)[0]

            # pick any two indexes randomly
            id1 = np.random.randint(0, idx1.shape[0])
            id2 = np.random.randint(0, idx2.shape[0])

        if from_train:
            return np.array([x_train[id1], x_train[id2]])
        else:
            return np.array([x_test[id1], x_test[id2]])

    def generator(from_train):
        while True:
            X = [[None, None]] * batch_size
            y = [[None]] * batch_size
            indexes = np.arange(batch_size)
            identical = True
            for i in indexes:
                X[i] = make_img_pair(identical, from_train)
                y[i] = [1 if identical else 0]
                identical = not identical
            np.random.shuffle(indexes)
            X = np.asarray(X)[indexes]
            y = np.asarray(y)[indexes]
            # print("generator: from_train:", from_train, " - X:", X.shape, "- y:", y.shape)
            yield [X[:, 0], X[:, 1]], y

    siamese_model_file = "./siam_log/siamese.h5"
    epochs = 100
    callbacks = [
        LearningRateScheduler(
            partial(
                getitem,
                tuple(
                    take(
                        epochs,
                        concat(repeat(0.01, 1), repeat(0.1, 99),
                               repeat(0.01, 50), repeat(0.001)))))),
        ModelCheckpoint(filepath=siamese_model_file),
        TensorBoard(log_dir="./siam_log", batch_size=batch_size)
    ]
    outputs = siamese.fit_generator(
        generator(from_train=True),
        initial_epoch=0,
        steps_per_epoch=x_train.shape[0] // batch_size,
        epochs=epochs,
        validation_data=generator(from_train=False),
        validation_steps=x_test.shape[0] // batch_size,
        callbacks=callbacks)

    with open('./siam_log/history.pickle', 'wb') as f:
        pickle.dump(outputs.history, f)
    save_model(siamese, siamese_model_file)