def main():
    (x_train, y_train), (x_validation, y_validation) = load_data()

    model = Model(*juxt(identity, computational_graph(y_train.shape[1]))(Input(
        shape=x_train.shape[1:])))
    model.compile(loss='categorical_crossentropy',
                  optimizer=SGD(momentum=0.9),
                  metrics=['accuracy'
                           ])  # 論文にはnesterov=Trueだと書いてあったけど、コードだとFalseだった……。

    model.summary()
    # plot_model(model, to_file='./results/model.png')

    train_data = ImageDataGenerator(featurewise_center=True,
                                    featurewise_std_normalization=True,
                                    width_shift_range=0.125,
                                    height_shift_range=0.125,
                                    horizontal_flip=True)
    validation_data = ImageDataGenerator(featurewise_center=True,
                                         featurewise_std_normalization=True)

    for data in (train_data, validation_data):
        data.fit(x_train)  # 実用を考えると、x_validationでのfeaturewiseのfitは無理だと思う……。

    batch_size = 128
    epoch_size = 200

    results = model.fit_generator(
        train_data.flow(x_train, y_train, batch_size=batch_size),
        steps_per_epoch=x_train.shape[0] // batch_size,
        epochs=epoch_size,
        callbacks=[
            LearningRateScheduler(
                partial(
                    getitem,
                    tuple(
                        take(
                            epoch_size,
                            concat(repeat(0.1, 60), repeat(0.02, 60),
                                   repeat(0.004, 40), repeat(0.0008))))))
        ],
        validation_data=validation_data.flow(x_validation,
                                             y_validation,
                                             batch_size=batch_size),
        validation_steps=x_validation.shape[0] // batch_size)

    with open('./results/history.pickle', 'wb') as f:
        pickle.dump(results.history, f)

    save_model(model, './results/model.h5')

    del model
Exemple #2
0
    def feedback(self,
                 inputs,
                 outputs,
                 initials=None,
                 latches=None,
                 keep_outputs=False):
        import warnings
        warnings.warn("deprecated", DeprecationWarning)

        def create_wire(val):
            iname, oname, lname, init = val
            return {
                'input': iname,
                'output': oname,
                'latch': lname,
                'init': init,
                'keep_output': keep_outputs
            }

        if initials is None:
            initials = fn.repeat(False)

        if latches is None:
            assert (set(inputs) & self.latches) == set()
            latches = inputs

        vals = zip(inputs, outputs, latches, initials)
        return self.loopback(*map(create_wire, vals))
Exemple #3
0
def get_interesting_repos(g: Github, session: Any) -> List[Repository]:
    repos: List[Repository] = []

    grepos = g.search_repositories(query='stars:>250 forks:>50',
                                   sort='stars',
                                   order='desc')
    records = zip(grepos, fy.repeat('most_stars'))

    grepos = g.search_repositories(query='forks:>5 topic:kaggle-competition',
                                   sort='stars',
                                   order='desc')
    records = fy.concat(records, zip(grepos, fy.repeat('kaggle')))

    grepos = g.search_repositories(query='forks:>5 topic:tensorflow-model',
                                   sort='stars',
                                   order='desc')
    records = fy.concat(records, zip(grepos, fy.repeat('tensorflow-model')))

    grepos = g.search_repositories(
        query='cookiecutterdatascience in:readme forks:>5 stars:>0 fork:true',
        sort='stars',
        order='desc')
    records = fy.concat(records,
                        zip(grepos, fy.repeat('cookiecutterdatascience')))

    for grepo, search_method in tqdm(records):
        repo = (session.query(Repository).filter(
            Repository.id == grepo.full_name).one_or_none())
        if repo is None:
            repo = Repository(
                id=grepo.full_name,
                owner=grepo.owner.login,
                name=grepo.name,
                description=grepo.description,
                search_method=search_method,
            )
            repos.append(repo)

    return repos
def create_file_obj_dict(namedtup):
    # create fields with all the info we are saving from the diff text
    change_dict = dict(zip(namedtup.__dict__['_fields'], funcy.repeat(None)))
    change_dict['raw_diff'] = []
    change_dict['filename_old'] = ''
    change_dict['filename_new'] = ''
    change_dict['functions_changed'] = []
    change_dict['locations_changed'] = []
    change_dict['list_changes'] = []
    change_dict['num_changes'] = 0
    change_dict['is_rename'] = False
    change_dict['is_new'] = False
    change_dict['is_deletion'] = False

    return change_dict
Exemple #5
0
    def step(self):
        self.elapse += 1
        self.actions = []

        for car, player in zip(self.cars, concat(self.players, repeat(None))):
            # アクションを取得します。
            acceleration, braking, steering = player.get_action(
                self.create_observation(car)) if player else (0, 0, 0)

            # アクションを正規化します。
            acceleration = self._clip(acceleration, -1, 1)
            braking = self._clip(braking, 0, 1)  # noqa: E221, E241
            steering = self._clip(steering, -1, 1)  # noqa: E221, E241

            # 正規化したアクションを記録します。
            self.actions.append((acceleration, braking, steering))

            # 衝突して故障した車は、修理が終わるまでは行動できません。
            if car.crash_energy > 0:
                car.crash_energy = max(car.crash_energy - 100000, 0)
                continue

            # ゆらぎを出すために、アクションに小さな正規乱数を加えます。スターの次の出現位置が変わると強化学習が難しくなりそうなので、別のRandomインスタンスを使用します。
            acceleration = self._clip(
                acceleration + self.control_random.gauss(0, 0.05), -1, 1)
            braking = self._clip(braking + self.control_random.gauss(0, 0.05),
                                 0, 1)  # noqa: E221, E241
            steering = self._clip(steering +
                                  self.control_random.gauss(0, 0.05), -1,
                                  1)  # noqa: E221

            # アクションを実行します。
            car.accelerate(acceleration * 20000)
            car.brake(braking * 200000)
            car.steer(steering * 20000)

        self.space.step(1 / FPS)

        for star in filter(lambda star: star.is_catched, self.stars):
            self._reset_star_position(star)
            star.is_catched = False

        return self.elapse >= GAME_PERIOD_SEC * FPS  # ゲームはGAME_PERIOD_SECで終了します。
Exemple #6
0
def busy_account_following(account_name, following):
    """
    Fetch users followers or followings and their metadata.
    Returned list is ordered by follow time (newest followers first). \n
    Usage: `GET /busy/<string:account_name>/<string:following>`\n
    `following` must be 'following' or 'followers'.\n
    """
    if following not in ['following', 'followers']:
        raise ParseError(detail='Please specify following or followers.')

    acc = mongo.db['Accounts'].find_one({'name': account_name}, {
        following: 1,
        '_id': 0
    })
    if not acc:
        raise NotFound(detail='Could not find STEEM account %s' % account_name)

    # if follower list is empty
    if not acc[following]:
        return []

    allowed_fields = {
        '_id': 0,
        'name': 1,
        'sp': 1,
        'rep': 1,
        'followers_count': 1,
        'following_count': 1,
        'post_count': 1,
    }
    accounts_w_meta = list(mongo.db['Accounts'].find(
        {'name': {
            '$in': acc[following]
        }}, allowed_fields))

    # return in LIFO order (last to follow is listed first)
    accounts_ordered = list(repeat('', len(acc[following])))
    for a in accounts_w_meta:
        with suppress(ValueError):
            accounts_ordered[acc[following].index(a.get('name', None))] = a
    return [x for x in accounts_ordered if x][::-1]
Exemple #7
0
    def __init__(self, players, seed=None):
        self.game_random = Random(seed)
        self.control_random = Random(seed)

        self.players = players

        self.elapse = 0
        self.actions = repeat((0, 0, 0), len(players))

        self.space = pymunk.Space()

        self.space.add_wildcard_collision_handler(1).post_solve = self._crash
        self.space.add_wildcard_collision_handler(2).begin = self._catch

        self.cars = []
        self.obstacles = []
        self.stars = []

        for a, b in ((-1000, 1000),
                     (1000, 1000)), ((1000, 1000),
                                     (1000,
                                      -1000)), ((1000, -1000),
                                                (-1000,
                                                 -1000)), ((-1000, -1000),
                                                           (-1000, 1000)):
            self._append_wall(a, b)

        for position, angle in map(
                lambda i:
            (pymunk.Vec2d(80, 0).rotated(pi * 2 / 8 * i), pi * 2 / 8 * i),
                range(8)):
            self._append_car(position, angle)

        for _ in range(OBSTACLE_COUNT):
            self._append_obstacle()

        for _ in range(STAR_COUNT):
            self._append_star()
Exemple #8
0
class IXY(Structure):
    _fields_ = list(zip(IXY_props, F.repeat(c_int)))
Exemple #9
0
class NODE(Structure):
    _fields_ = list(zip(NODE_props, F.repeat(c_int)))
  parser.add_argument(
    "--no-wl2", action="store_true",
    help="Do not compute WL2 encodings.")
  parser.add_argument(
    "-d", "--dataset", action="append",
    help="Prepare this dataset.")
  args = parser.parse_args()
  p = mp.cpu_count() if args.parallel else 1

  if args.dataset is None or len(args.dataset) == 0:
    ds = datasets.stored
  else:
    ds = args.dataset

  dsl = len(ds)

  print(args)

  print(f"Will prepare {dsl} stored datasets with parallelism {p}:")
  for d in ds:
    print(f"- {d}")

  print("Starting...")

  with mp.Pool(p) as p:
    p.starmap(
      prepare_ds,
      zip(ds, fy.repeat(not args.no_wl2), fy.repeat(not args.no_gram)))

  print("Prepared all stored datasets.")
def run():
    batch_size = 32
    num_classes = 10
    epochs = 200

    with tf.device("/cpu:0"):
        (x_train, y_train), (x_test, y_test) = cifar10.load_data()
        # Convert class vectors to binary class matrices.
        y_train = to_categorical(y_train, num_classes)
        y_test = to_categorical(y_test, num_classes)

        x_train = x_train.astype('float32', copy=False)
        x_test = x_test.astype('float32', copy=False)
        x_train /= 255
        x_test /= 255

    optimizer = Adam(lr=0.001)

    model = SqueezeNet(classes=num_classes)
    squeezenet_model_file = './sqz_log/model.h5'
    if os.path.exists(squeezenet_model_file):
        model.layers.pop()
        model = Model(name="sqzn_no_softmax",
                      inputs=model.input,
                      outputs=model.layers[-1].output)
        model.load_weights(squeezenet_model_file, by_name=True)
        # model.load_weights(squeezenet_model_file, by_name=True)
    else:
        # train a new SqueezeNet
        model.compile(loss='categorical_crossentropy',
                      optimizer=optimizer,
                      metrics=['accuracy'])

        # train_data = ImageDataGenerator(featurewise_center=True, featurewise_std_normalization=True,
        #                                 width_shift_range=0.125, height_shift_range=0.125, horizontal_flip=True)
        # validation_data = ImageDataGenerator(featurewise_center=True, featurewise_std_normalization=True)
        train_data = ImageDataGenerator()
        validation_data = ImageDataGenerator()
        for data in (train_data, validation_data):
            data.fit(x_train)

        callbacks = [
            LearningRateScheduler(
                partial(
                    getitem,
                    tuple(
                        take(
                            epochs,
                            concat(repeat(0.01, 1), repeat(0.1, 99),
                                   repeat(0.01, 50), repeat(0.001)))))),
            ModelCheckpoint(filepath=squeezenet_model_file),
            TensorBoard(log_dir="./sqz_log", batch_size=batch_size)
        ]
        results = model.fit_generator(
            train_data.flow(x_train, y_train, batch_size=batch_size),
            steps_per_epoch=x_train.shape[0] // batch_size,
            epochs=epochs,
            callbacks=callbacks,
            validation_data=validation_data.flow(x_test,
                                                 y_test,
                                                 batch_size=batch_size),
            validation_steps=x_test.shape[0] // batch_size)

        with open('./sqz_log/history.pickle', 'wb') as f:
            pickle.dump(results.history, f)
        save_model(model, squeezenet_model_file)

    # Build the siamese architecture
    # model_cut = Model(name="sqzn_no_softmax", inputs=model.input, outputs=model.layers[-1].output)
    # model_cut.load_weights(squeezenet_model_file, by_name=True)
    # with tf.device("/cpu:0"):
    #     model_cut.summary()

    input_shape = x_train.shape[1:]

    im_in1 = Input(shape=input_shape)
    im_in2 = Input(shape=input_shape)
    feat_x1 = model(im_in1)
    feat_x2 = model(im_in2)
    lambda_merge = Lambda(euclidean_distance,
                          output_shape=(1, ))([feat_x1, feat_x2])

    siamese = Model(name="siamese",
                    inputs=[im_in1, im_in2],
                    outputs=lambda_merge)
    with tf.device("/cpu:0"):
        siamese.summary()

    optimizer = RMSprop()  # SGD(momentum=0.9)
    siamese.compile(optimizer=optimizer,
                    loss=contrastive_loss,
                    metrics=[accuracy])

    def make_img_pair(identical, from_train):
        """Select the image pairs"""
        label = np.random.randint(0, num_classes)
        if identical:
            if from_train:
                idx = np.nonzero(y_train[:, label] == 1)[0]
            else:
                idx = np.nonzero(y_test[:, label] == 1)[0]

            # pick any two indexes randomly
            id1 = np.random.randint(0, idx.shape[0])
            id2 = np.random.randint(0, idx.shape[0])
            while id1 == id2:
                id2 = np.random.randint(0, idx.shape[0])
        else:
            if from_train:
                idx1 = np.nonzero(y_train[:, label] == 1)[0]
                idx2 = np.nonzero(y_train[:,
                                          (label + 1) % num_classes] == 1)[0]
            else:
                idx1 = np.nonzero(y_test[:, label] == 1)[0]
                idx2 = np.nonzero(y_train[:,
                                          (label + 1) % num_classes] == 1)[0]

            # pick any two indexes randomly
            id1 = np.random.randint(0, idx1.shape[0])
            id2 = np.random.randint(0, idx2.shape[0])

        if from_train:
            return np.array([x_train[id1], x_train[id2]])
        else:
            return np.array([x_test[id1], x_test[id2]])

    def generator(from_train):
        while True:
            X = [[None, None]] * batch_size
            y = [[None]] * batch_size
            indexes = np.arange(batch_size)
            identical = True
            for i in indexes:
                X[i] = make_img_pair(identical, from_train)
                y[i] = [1 if identical else 0]
                identical = not identical
            np.random.shuffle(indexes)
            X = np.asarray(X)[indexes]
            y = np.asarray(y)[indexes]
            # print("generator: from_train:", from_train, " - X:", X.shape, "- y:", y.shape)
            yield [X[:, 0], X[:, 1]], y

    siamese_model_file = "./siam_log/siamese.h5"
    epochs = 100
    callbacks = [
        LearningRateScheduler(
            partial(
                getitem,
                tuple(
                    take(
                        epochs,
                        concat(repeat(0.01, 1), repeat(0.1, 99),
                               repeat(0.01, 50), repeat(0.001)))))),
        ModelCheckpoint(filepath=siamese_model_file),
        TensorBoard(log_dir="./siam_log", batch_size=batch_size)
    ]
    outputs = siamese.fit_generator(
        generator(from_train=True),
        initial_epoch=0,
        steps_per_epoch=x_train.shape[0] // batch_size,
        epochs=epochs,
        validation_data=generator(from_train=False),
        validation_steps=x_test.shape[0] // batch_size,
        callbacks=callbacks)

    with open('./siam_log/history.pickle', 'wb') as f:
        pickle.dump(outputs.history, f)
    save_model(siamese, siamese_model_file)
def main():
    import os
    with tf.device("/cpu:0"):
        (x_train, y_train), (x_validation, y_validation) = load_data()

    batch_size = 32
    epochs = 200
    input_shape = Input(shape=x_train.shape[1:])
    model_file = './results/model.h5'
    if os.path.exists(model_file):
        model = load_model(model_file)
        # with tf.device("/cpu:0"):
        #     validation_data = ImageDataGenerator(featurewise_center=True, featurewise_std_normalization=True)
    else:
        model = Model(*juxt(identity, computational_graph(y_train.shape[1]))(
            input_shape))
        model.compile(loss='categorical_crossentropy',
                      optimizer=SGD(momentum=0.9),
                      metrics=['accuracy'])

        with tf.device("/cpu:0"):
            train_data = ImageDataGenerator(featurewise_center=True,
                                            featurewise_std_normalization=True,
                                            width_shift_range=0.125,
                                            height_shift_range=0.125,
                                            horizontal_flip=True)
            validation_data = ImageDataGenerator(
                featurewise_center=True, featurewise_std_normalization=True)

        for data in (train_data, validation_data):
            data.fit(
                x_train)  # 実用を考えると、x_validationでのfeaturewiseのfitは無理だと思う……。

        results = model.fit_generator(
            train_data.flow(x_train, y_train, batch_size=batch_size),
            steps_per_epoch=x_train.shape[0] // batch_size,
            epochs=epochs,
            callbacks=[
                LearningRateScheduler(
                    partial(
                        getitem,
                        tuple(
                            take(
                                epochs,
                                concat(repeat(0.01, 1), repeat(0.1, 99),
                                       repeat(0.01, 50), repeat(0.001))))))
            ],
            validation_data=validation_data.flow(x_validation,
                                                 y_validation,
                                                 batch_size=batch_size),
            validation_steps=x_validation.shape[0] // batch_size)

        with open('./results/history.pickle', 'wb') as f:
            pickle.dump(results.history, f)
        save_model(model, model_file)

    try:
        with tf.device("/cpu:0"):
            # model.summary()
            # print("=== AFTER POPPING THE LAST ===")
            model.layers.pop()
            # model.summary()
            # generate_confusion_matrix(model, x_validation, y_validation, batch_size)
            # plot_model(model, to_file='./results/model.png')
    except Exception as ex:
        print("plot_model failed with error:", repr(ex), "\nMoving on...")

    siamese(input_shape, model)
Exemple #13
0
def	main():

	#
	# CIFAR-10
	#
	cifar = CIFAR_10()

	#
	# x_train.shape		= (50000, 32, 32, 3)
	# y_train.shape		= (50000, 10)
	# x_validation.shape= (10000, 32, 32, 3)
	# y_validation.shape= (10000, 10)
	#
	data			= cifar.load_data()
	x_train			= data['training_data']
	y_train			= data['training_label']
	x_validation	= data['validation_data']
	y_validation	= data['validation_label']
	print("x_train.shape=", x_train.shape)
	print("y_train.shape=", y_train.shape)
	print("x_validation.shape=", x_validation.shape)
	print("y_validation.shape=", y_validation.shape)


	#
	# SqueezeNet
	#
	squeeze = SqueezeNet()
	i = Input(shape=x_train.shape[1:])
	o = squeeze.make_graph(y_train.shape[1])(i)

	#
	# model
	#
	model = Model(inputs=i, outputs=o)

	#
	# compile model
	#
	model.compile(
			loss='categorical_crossentropy',
			optimizer=SGD(momentum=0.9),
			metrics=['accuracy']
			)

	#
	# generator in ImageDataGenerator by keras
	#
	train_data = ImageDataGenerator(
			featurewise_center=True,
			featurewise_std_normalization=True,
			width_shift_range=0.125,
			height_shift_range=0.125,
			horizontal_flip=True
			)
	validation_data = ImageDataGenerator(
			featurewise_center=True,
			featurewise_std_normalization=True
			)
	for data in (train_data, validation_data):
		data.fit(x_train)  # 実用を考えると、x_validationでのfeaturewiseのfitは無理だと思う… … 。

	#
	# check pickle
	#
	# file_pickle = "./results/history.pickle"
	model_path		= "./results"
	model_file  	= model_path + "/model.h5"
	model_weights	= model_path + "/weights.h5"
	print(f"models: model={model_file}, weight={model_weights}" )
	# print(f"models: arch  =", options['file_arch'])
	# print(f"models: weight=", options['model_weights'])
	if not path.exists(model_path):
		os.mkdir(model_path)

	#
	# print model
	#
	from lib_utils import print_model_summary
	print_model_summary(model, "./results/network.txt", "model.png")


	#
	# check model, if not exist trained model, we have to make trained parameters for model.
	#
	if not path.exists(model_file):

		#
		# fit generator
		#
		batch_size = 1000	# 100
		epochs     = 1		# 200
		results = model.fit_generator(
			#
			# generate train data (ImageDataGenerator by keras)
			#
			train_data.flow(x_train, y_train, batch_size=batch_size),

			#
			# steps/epoch
			#
			steps_per_epoch=x_train.shape[0] // batch_size,

			#
			# epoch
			#
			epochs=epochs,

			#
			# callbacks
			#
			callbacks = [
				LearningRateScheduler(
					partial(
						getitem,
						tuple(take(epochs, concat(repeat(0.010, 1), repeat(0.100, 99), repeat(0.010, 50), repeat(0.001))))
						)
					)
				],
			#
			# generate validation data (ImageDataGenerator by keras)
			#
			validation_data=validation_data.flow(x_validation, y_validation, batch_size=batch_size),

			#
			# validation step
			#
			validation_steps=x_validation.shape[0] // batch_size,

			#
			# max_queue_size
			#
			max_queue_size=4
			)

		#
		# save keras model
		#
		from lib_utils import save_model_by_keras
		save_model_by_keras(model, model_file, model_weights)

		# del model

	else:
		#
		# load keras model
		#
		if path.exists(model_file):
			print("load model...")
			from lib_utils import load_model_by_keras
			model = load_model_by_keras(model_file, model_weights)
			print("load model...done")
		else:
			print("load model...: not found=", model_file, model_weights )

	#
	# check version
	#
	from lib_utils import get_version
	get_version(model_file)

		
	#
	# evaluate
	#
	"""
	print("model evaluate...")
	score = lmodel.evaluate(x_validation, y_validation, verbose=1)
	print("model evaluate: loss=", score[0])
	print("model evaluate: accuracy=", score[1])
	"""

	#
	# prediction
	#
	print("model prediction...")
	# lmodel.predict(y_validation.shape[1])
	# lmodel.predict(x_train.shape[1:])
	print("x_validation.shape=", x_validation.shape)
	print("x_validation.shape[0]=", x_validation.shape[0])
	print("x_validation.shape[1]=", x_validation.shape[1])
	print("x_validation.shape[2]=", x_validation.shape[2])
	print("x_validation.shape[3]=", x_validation.shape[3])
	i0 = x_validation[0:1]
	i1 = x_validation.reshape(10000,32,32,3)
	i2 = i1[0]
	print("i0.shape=", i0.shape)
	print("i1.shape=", i1.shape)
	print("i2.shape=", i2.shape)
	# lmodel.predict(i0, verbose=1)
	predo = model.predict(x_validation, verbose=1)[0]
	print(predo)

	"""
	"""
	preds = model.predict(x_validation, verbose=1)

	# for pre in preds:
	# 	y = pre.argmax()
	# 	print("label: ", y_validation[y])

	print('done')