Esempio n. 1
0
def update_video(start_date, end_date, n_clicks_timestamp):
    logger.info(f"n_clicks_timestamp={n_clicks_timestamp}")
    latest_file = max(Path("history_data/").glob("dxy_minutes*.json"),
                      key=lambda p: p.stat().st_ctime)
    with open(
            latest_file,
            "r",
            encoding="utf8",
    ) as f:
        history = json.load(f)
    if (start_date and end_date and n_clicks_timestamp
            and (n_clicks_timestamp != -1) and
        (datetime.now() -
         utils.timestamp2datetime(n_clicks_timestamp / 1000)).seconds < 1):
        logger.info("[开始] 更新视频")
        fps = 30
        dpi = 300
        figdir = "assets/figures"
        Path(figdir).mkdir(exist_ok=True, parents=True)
        utils.rmfigures(figdir)
        logger.info("[开始] 生成图片")
        utils.generate_figures(history, provinces_geomap, provinces_list,
                               start_date, end_date, dpi, figdir)
        logger.info("[结束] 生成图片")
        videoname = f"assets/tncg-{start_date.replace('-', '')}-{end_date.replace('-', '')}-{datetime.now().strftime('%Y%m%d%H%M%S')}.mp4"
        logger.info("[开始] 生成视频")
        utils.generate_video(f"{figdir}/%d.png", videoname, 30)
        logger.info("[结束] 生成视频")
        logger.info("[结束] 更新视频")
        src = f"/{videoname}"
        logger.debug(f"src={src}")
        return src
Esempio n. 2
0
                                              num_words=args.vocab_size,
                                              maxlen=args.maxLen)
x_train = sequence.pad_sequences(x_train, maxlen=args.maxLen)
x_test = sequence.pad_sequences(x_test, maxlen=args.maxLen)

model = cnn(vocab_size=args.vocab_size,
            maxLen=args.maxLen,
            kernel_size=args.kernel_size,
            embedding_dim=args.embed,
            hidden_dim=args.hidden,
            output_dim=args.output,
            keep_prob=args.keep)

model.compile(optimizer=optimizers.Adam(lr=args.lr),
              loss='binary_crossentropy',
              metrics=['accuracy'])

print(model.summary())
history = model.fit(x_train,
                    y_train,
                    validation_split=args.val_split,
                    batch_size=args.batch,
                    epochs=args.epochs,
                    callbacks=[EarlyStopping(monitor='val_loss', patience=10)])

y_pred = model.predict(x_test)
generate_figures(history=history,
                 model_name=args.model_name,
                 output_dir="figures")
output_performance(model=model, y_test=y_test, y_pred=y_pred)
Esempio n. 3
0
    q_vals = session.run(target_dqn.q_values,
                         feed_dict={target_dqn.input: new_states})
    double_q = q_vals[range(batch_size), arg_q_max]
    # Bellman equation. Multiplication with (1-terminal_flags) makes sure that
    # if the game is over, targetQ=rewards
    target_q = rewards + (gamma * double_q * (1 - terminal_flags))
    # Gradient descend step to update the parameters of the main network
    loss, _ = session.run(
        [main_dqn.loss, main_dqn.update],
        feed_dict={
            main_dqn.input: states,
            main_dqn.target_q: target_q,
            main_dqn.action: actions
        })
    return loss


args = utils.argsparser()
tf.random.set_random_seed(args.seed)
np.random.seed(args.seed)

tf.reset_default_graph()
# Control parameters
if args.task == "train":
    utils.train(args, DQN, learn, "dist_dqn")
elif args.task == "evaluate":
    utils.sample(args, DQN, "dist_dqn", save=False)
elif args.task == "log":
    utils.generate_figures("dist_dqn")
else:
    utils.sample(args, DQN, "dist_dqn")
Esempio n. 4
0
    expert_loss, _ = session.run(
        [main_dqn.expert_loss, main_dqn.expert_update],
        feed_dict={
            main_dqn.input: expert_states,
            main_dqn.generated_input: generated_states,
            main_dqn.expert_action: expert_actions,
            main_dqn.expert_weights: weights
        })
    return loss, expert_loss


args = utils.argsparser()
tf.random.set_random_seed(args.seed)
np.random.seed(args.seed)

tf.reset_default_graph()
# Control parameters
if args.task == "train":
    utils.train(args,
                DQN,
                learn,
                "expert_dist_dqn",
                expert=True,
                bc_training=train_bc,
                pretrain_iters=args.pretrain_bc_iter)
elif args.task == "evaluate":
    utils.sample(args, DQN, "expert_dist_dqn", save=False)
elif args.task == "log":
    utils.generate_figures("expert_dist_dqn")
else:
    utils.sample(args, DQN, "expert_dist_dqn")
Esempio n. 5
0
    expert_q_vals = session.run(target_dqn.q_values, feed_dict={target_dqn.input:obs})
    double_q = q_vals[range(batch_size), arg_q_max]
    expert_q = expert_q_vals[range(batch_size),expert_arg_q_max]
    # Bellman equation. Multiplication with (1-terminal_flags) makes sure that
    # if the game is over, targetQ=rewards
    target_q = rewards + (gamma*double_q * (1-terminal_flags))
    # Gradient descend step to update the parameters of the main network
    loss, _ = session.run([main_dqn.loss, main_dqn.update],
                          feed_dict={main_dqn.input:states,
                                     main_dqn.target_q:target_q,
                                     main_dqn.action:actions})
    expert_loss, _ = session.run([main_dqn.expert_loss,main_dqn.expert_update],
                                 feed_dict={main_dqn.input:obs,
                                            main_dqn.expert_action:acs,
                                            main_dqn.target_q:expert_q})
    return loss,expert_loss

args = utils.argsparser()
tf.random.set_random_seed(args.seed)
np.random.seed(args.seed)

tf.reset_default_graph()
# Control parameters
if args.task == "train":
    utils.train(args, DQN, learn, "basic_fengdi_dist_expert_dqn", expert=True)
elif args.task == "evaluate":
    utils.sample(args, DQN, "basic_fengdi_dist_expert_dqn", save=False)
elif args.task == "log":
    utils.generate_figures("basic_fengdi_dist_expert_dqn")
else:
    utils.sample(args, DQN, "basic_fengdi_dist_expert_dqn")