Beispiel #1
0
    def __init__(self, cfg):
        self.cfg = cfg
        train_dataset = getDataSet(cfg['data']['train'], 'train',
                                   cfg['data']['scale'])
        self.train_loader = DataLoader(
            train_dataset,
            cfg['data']['train']['batch_size'],
            shuffle=True,
            num_workers=cfg['data']['train']['n_workers'])
        val_dataset = getDataSet(cfg['data']['val'], 'val',
                                 cfg['data']['scale'])
        self.val_loader = DataLoader(
            val_dataset,
            1,
            shuffle=False,
            num_workers=cfg['data']['val']['n_workers'])
        self.records = {'Epoch': [], 'PSNR': [], 'SSIM': []}
        self.log_dir = os.path.join(
            cfg['output_dir'], cfg['name'],
            time.strftime('%Y-%m-%d-%H:%M:%S', time.localtime(time.time())))
        self.logger = utils.Logger(os.path.join(self.log_dir, 'info.log'))
        self.max_epochs = cfg['schedule']['num_epochs']
        self.checkpoint_dir = os.path.join(self.log_dir, 'checkpoint')
        if not os.path.exists(self.checkpoint_dir):
            os.makedirs(self.checkpoint_dir)
        self.epoch = 1
        save_config(cfg, os.path.join(self.log_dir, 'config.yml'))

        self.logger.log('Train dataset has {} images and {} batches.'.format(
            len(train_dataset), len(self.train_loader)))
        self.logger.log('Val dataset has {} images and {} batches.'.format(
            len(val_dataset), len(self.val_loader)))
def main(args):

    # make the export folder structure
    # this is made here because the Logger uses the filename
    if args.do_save:
        # make a base save directory
        utils.make_dir(args.save_dir)

        # make a directory in the base save directory with for the specific
        # method.
        save_subdir = os.path.join(args.save_dir,
                                   args.dataset + "_" + args.sampling_method)
        utils.make_dir(save_subdir)

        filename = os.path.join(
            save_subdir,
            "log-" + strftime("%Y-%m-%d-%H-%M-%S", gmtime()) + ".txt")
        sys.stdout = utils.Logger(filename)

    # confusion argument can have multiple values
    confusions = [float(t) for t in args.confusions.split(" ")]
    mixtures = [float(t) for t in args.active_sampling_percentage.split(" ")]
    max_dataset_size = None if args.max_dataset_size == 0 else args.max_dataset_size
    starting_seed = args.seed

    # get the dataset from file based on the data directory and dataset name
    X, y = utils.get_mldata(args.data_dir, args.dataset)

    # object to store the results in
    all_results = {}

    # percentage of labels to randomize
    for c in confusions:

        # Mixture weights on active sampling."
        for m in mixtures:

            # the number of curves created during multiple trials
            for seed in range(starting_seed, starting_seed + args.trials):

                # get the sampler based on the name
                # returns a python object
                # also named: query strategy
                sampler = get_AL_sampler(args.sampling_method)

                # get the model
                score_model = utils.get_model(args.score_method, seed)

                #
                if (args.select_method == "None"
                        or args.select_method == args.score_method):
                    select_model = None
                else:
                    select_model = utils.get_model(args.select_method, seed)

                # create the learning curve
                results, sampler_state = generate_one_curve(
                    X,
                    y,
                    sampler,
                    score_model,
                    seed,
                    args.warmstart_size,
                    args.batch_size,
                    select_model,
                    confusion=c,
                    active_p=m,
                    max_points=max_dataset_size,
                    standardize_data=args.standardize_data,
                    norm_data=args.normalize_data,
                    train_horizon=args.train_horizon)
                key = (args.dataset, args.sampling_method, args.score_method,
                       args.select_method, m, args.warmstart_size,
                       args.batch_size, c, args.standardize_data,
                       args.normalize_data, seed)
                sampler_output = sampler_state.to_dict()
                results["sampler_output"] = sampler_output
                all_results[key] = results

    # Not sure why this is done in a qay like this.
    fields = [
        "dataset", "sampler", "score_method", "select_method",
        "active percentage", "warmstart size", "batch size", "confusion",
        "standardize", "normalize", "seed"
    ]
    all_results["tuple_keys"] = fields

    # write the results to a file
    if args.do_save:

        # format the filename
        filename = "results_score_{}_select_{}_norm_{}_stand_{}".format(
            args.score_method, args.select_method, args.normalize_data,
            args.standardize_data)

        existing_files = gfile.Glob(
            os.path.join(save_subdir, "{}*.pkl".format(filename)))
        filepath = os.path.join(
            save_subdir, "{}_{}.pkl".format(filename,
                                            1000 + len(existing_files))[1:])

        # dump the dict to a pickle file
        pickle.dump(all_results, gfile.GFile(filepath, "w"))

        # flush stfout
        sys.stdout.flush_file()
Beispiel #3
0
"""
As implemented in https://github.com/abewley/sort but with some modifications
"""

from __future__ import print_function

import utils.utils as utils
import numpy as np
from correlation_tracker import CorrelationTracker
from data_association import associate_detections_to_trackers
from kalman_tracker import KalmanBoxTracker

from kalman_tracker import convert_x_to_bbox

logger = utils.Logger("MOT")


class Sort:
    def __init__(self, max_age=1, min_hits=3, use_dlib=False):
        """
        Sets key parameters for SORT
        """
        self.max_age = max_age
        self.min_hits = min_hits
        self.trackers = []
        self.frame_count = 0

        self.use_dlib = use_dlib

    def update(self,
               dets,
Beispiel #4
0
def main(argv):
    del argv

    if not gfile.Exists(FLAGS.save_dir):
        try:
            gfile.MkDir(FLAGS.save_dir)
        except:
            print(('WARNING: error creating save directory, '))

    save_dir = os.path.join(FLAGS.save_dir,
                            FLAGS.dataset + '_' + FLAGS.sampling_method)

    if FLAGS.do_save == "True":
        if not gfile.Exists(save_dir):
            try:
                gfile.MkDir(save_dir)
            except:
                print(('WARNING: error creating save directory, '
                       'directory most likely already created.'))

        # Set up logging
        filename = os.path.join(
            save_dir,
            "log-" + strftime("%Y-%m-%d-%H-%M-%S", gmtime()) + ".txt")
        sys.stdout = utils.Logger(filename)

    X, y = utils.get_mldata(FLAGS.data_dir, FLAGS.dataset)  #load dataset!
    starting_seed = FLAGS.seed

    all_results = {}

    for seed in range(starting_seed, starting_seed + FLAGS.trials):
        sampler = get_AL_sampler(FLAGS.sampling_method)  #load sampler!
        score_model = utils.get_model(FLAGS.score_method,
                                      seed)  #load score model!
        if (FLAGS.select_method == "None" or  #load select model!
                FLAGS.select_method == FLAGS.score_method):
            select_model = None
        else:
            select_model = utils.get_model(FLAGS.select_method, seed)

        results, sampler_state = \
        generate_one_curve(X=X,
                           y=y,
                           sampler=sampler,
                           score_model=score_model,
                           seed=seed,
                           warmstart_size=FLAGS.warmstart_size,
                           batch_size=FLAGS.batch_size,
                           select_model=select_model,
                           max_points=FLAGS.max_dataset_size)

        key = (FLAGS.dataset, FLAGS.sampling_method, FLAGS.score_method,
               FLAGS.select_method, FLAGS.warmstart_size, FLAGS.batch_size,
               seed)

        #sampler_output = sampler_state.to_dict()
        #results['sampler_output'] = sampler_output
        results['sampler_output'] = None
        all_results[key] = results

    fields = [
        'dataset', 'sampling_methods', 'score_method', 'select_method',
        'warmstart size', 'batch size', 'seed'
    ]
    all_results['tuple_keys'] = fields

    if FLAGS.do_save == "True":
        filename = ("results_score_" + FLAGS.score_method + "_select_" +
                    FLAGS.select_method)
        existing_files = gfile.Glob(os.path.join(save_dir, filename + "*.pkl"))
        filename = os.path.join(
            save_dir,
            filename + "_" + str(1000 + len(existing_files))[1:] + ".pkl")
        pickle.dump(all_results, gfile.GFile(filename, "w"))
        sys.stdout.flush_file()
Beispiel #5
0
def main(argv):
  del argv

  if not gfile.Exists(FLAGS.save_dir):
    try:
      gfile.MkDir(FLAGS.save_dir)
    except:
      print(('WARNING: error creating save directory, '
             'directory most likely already created.'))

  save_dir = os.path.join(
      FLAGS.save_dir,
      FLAGS.dataset + "_" + FLAGS.sampling_method)
  do_save = FLAGS.do_save == "True"

  if do_save:
    if not gfile.Exists(save_dir):
      try:
        gfile.MkDir(save_dir)
      except:
        print(('WARNING: error creating save directory, '
               'directory most likely already created.'))
    # Set up logging
    filename = os.path.join(
        save_dir, "log-" + strftime("%Y-%m-%d-%H-%M-%S", gmtime()) + ".txt")
    sys.stdout = utils.Logger(filename)

  confusions = [float(t) for t in FLAGS.confusions.split(" ")]
  mixtures = [float(t) for t in FLAGS.active_sampling_percentage.split(" ")]
  all_results = {}
  max_dataset_size = None if FLAGS.max_dataset_size == "0" else int(
      FLAGS.max_dataset_size)
  normalize_data = FLAGS.normalize_data == "True"
  standardize_data = FLAGS.standardize_data == "True"
  X, y = utils.get_mldata(FLAGS.data_dir, FLAGS.dataset)
  starting_seed = FLAGS.seed

  for c in confusions:
    for m in mixtures:
      for seed in range(starting_seed, starting_seed + FLAGS.trials):
        sampler = get_AL_sampler(FLAGS.sampling_method)
        score_model = utils.get_model(FLAGS.score_method, seed)
        if (FLAGS.select_method == "None" or
            FLAGS.select_method == FLAGS.score_method):
          select_model = None
        else:
          select_model = utils.get_model(FLAGS.select_method, seed)
        results, sampler_state = generate_one_curve(
            X, y, sampler, score_model, seed, FLAGS.warmstart_size,
            FLAGS.batch_size, select_model, c, m, max_dataset_size,
            standardize_data, normalize_data, FLAGS.train_horizon)
        key = (FLAGS.dataset, FLAGS.sampling_method, FLAGS.score_method,
               FLAGS.select_method, m, FLAGS.warmstart_size, FLAGS.batch_size,
               c, standardize_data, normalize_data, seed)
        sampler_output = sampler_state.to_dict()
        results["sampler_output"] = sampler_output
        all_results[key] = results
  fields = [
      "dataset", "sampler", "score_method", "select_method",
      "active percentage", "warmstart size", "batch size", "confusion",
      "standardize", "normalize", "seed"
  ]
  all_results["tuple_keys"] = fields

  if do_save:
    filename = ("results_score_" + FLAGS.score_method +
                "_select_" + FLAGS.select_method +
                "_norm_" + str(normalize_data) +
                "_stand_" + str(standardize_data))
    existing_files = gfile.Glob(os.path.join(save_dir, filename + "*.pkl"))
    filename = os.path.join(save_dir,
                            filename + "_" + str(1000+len(existing_files))[1:] + ".pkl")
    pickle.dump(all_results, gfile.GFile(filename, "w"))
    sys.stdout.flush_file()
Beispiel #6
0
from sklearn import preprocessing
from sklearn.cross_validation import train_test_split

tf.app.flags.DEFINE_string("MY_DATASETSLIB_HOME", "../my_datasets",
                           "数据集的名称,与MY_DATASETSLIB_HOME中的子文件夹相对应")
tf.app.flags.DEFINE_string("dataset", "casiadatabase",
                           "数据集的名称,与MY_DATASETSLIB_HOME中的子文件夹相对应")
tf.app.flags.DEFINE_integer("random_seed", "123", "情感类别数目")
tf.app.flags.DEFINE_integer("num_classes", "6", "情感类别数目")
tf.app.flags.DEFINE_integer("num_features", "384", "情感特征维数")

if not os.path.isdir(FLAGS.MY_DATASETSLIB_HOME):
    print(FLAGS.MY_DATASETSLIB_HOME, "不存在")
filename = os.path.join(
    os.getcwd(), "log-" + strftime("%Y-%m-%d", gmtime()) + "-ch05_3_1.txt")
sys.stdout = utils.Logger(filename)  #原来sys.stdout指向控制台,现在重定向到文件
"""
 打开语音数据集
 """
dataset_path = os.path.join(FLAGS.MY_DATASETSLIB_HOME, FLAGS.dataset)
print(" 数据集保存位置: " + dataset_path)
loc = os.path.join(dataset_path,
                   FLAGS.dataset + "_data.txt")  # 这个是提取出来的标签和语音特征所在的txt文件
data = np.loadtxt(loc, skiprows=0)
"""
生成训练集和测试集
"""
y = data[:, 0]
x = data[:, 1:]
"""
对情感语音数据集进行预处理
Beispiel #7
0
            score += batch_score
            upper_bound += (a.max(1)[0]).sum()
            num_data += pred.size(0)
    score = score / len(val_loader.dataset)
    upper_bound = upper_bound / len(val_loader.dataset)
    return score, upper_bound

if __name__ == '__main__':
    args = parse_args()

    # args.MFB_O = 500
    # args.MFB_K = 3
    args.mima = False

    utils.create_dir(args.output)
    logger = utils.Logger(os.path.join(args.output, 'log.txt'))
    logger.write(args.__repr__())
    os.environ['CUDA_VISIBLE_DEVICES'] = '0'
    device = torch.device("cuda:" + str(args.gpu) if args.gpu >= 0 else "cpu")
    args.device = device
    # Fixed ramdom seed
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)
    torch.backends.cudnn.benchmark = True
    torch.backends.cudnn.deterministic = True
    dictionary = dataset_RAD.Dictionary.load_from_file(os.path.join(args.RAD_dir, 'dictionary.pkl'))
    train_set = dataset_RAD.VQAFeatureDataset('train', args, dictionary, question_len=args.question_len)
    batch_size = args.batch_size

    model = Net(args, len(train_set.label2ans))
    model.to(device)
Beispiel #8
0
def train(FLAGS):
    # config
    misc = dict()
    misc['log_steps'] = FLAGS.log_steps
    utils.validate_dir(FLAGS.work_dir)
    misc['batch_size'] = FLAGS.bsize
    save_dir = utils.validate_dir(os.path.join(FLAGS.work_dir, 'ckpt'))
    copyfile(os.path.abspath(__file__), os.path.join(FLAGS.work_dir, __file__))

    # define logger
    if FLAGS.model_path is not None:
        misc['logger'] = utils.Logger(os.path.join(FLAGS.work_dir, 'log.txt'),
                                      'a')
    else:
        misc['logger'] = utils.Logger(os.path.join(FLAGS.work_dir, 'log.txt'))

    # model function
    model_fn = utils.dynamic_import(FLAGS.model)

    # build graph
    ops = dict()
    with tf.device('/gpu:{}'.format(FLAGS.gpu)):
        # define start epoch and global step
        start_epoch = tf.get_variable('start_epoch', [],
                                      tf.int32,
                                      initializer=tf.constant_initializer(0),
                                      trainable=False)
        update_start_ep = tf.assign(start_epoch, start_epoch + 1)
        ops['global_step'] = tf.get_variable(
            'global_step', [],
            tf.int32,
            initializer=tf.constant_initializer(0),
            trainable=False)

        # prepare dataset
        misc['logger'].write('Preparing dataset...')
        dataset = ModelNet40H5Dataset(FLAGS.train_file, FLAGS.test_file,
                                      FLAGS.bsize, FLAGS.npoints)
        misc['trainset_size'], misc['testset_size'] = dataset.size
        misc['label_map'] = dataset._label_map
        trainset = dataset.trainset
        testset = dataset.testset

        misc['logger'].write('Building model graph...')

        # define input and label
        ops['is_training'] = tf.placeholder(tf.bool, [], name='is_training')
        pointcloud = tf.cond(ops['is_training'], lambda: trainset.pointcloud,
                             lambda: testset.pointcloud)
        ops['label'] = tf.cond(ops['is_training'], lambda: trainset.label,
                               lambda: testset.label)

        # define model
        bn_decay = tf_layers.get_bn_decay(ops['global_step'],
                                          FLAGS.base_bn,
                                          FLAGS.bn_decay_steps,
                                          FLAGS.bn_decay_rate,
                                          FLAGS.end_bn,
                                          staircase=FLAGS.bn_staircase)
        ops['pred'], end_points = model_fn.get_model(pointcloud,
                                                     ops['is_training'],
                                                     bn_decay=bn_decay)

        # define loss
        ops['total_loss'] = model_fn.get_loss(ops['pred'], ops['label'],
                                              end_points)
        tf.summary.scalar('total loss', ops['total_loss'])

        # define metrics for training
        final_pred = tf.cast(tf.argmax(ops['pred'], 1), tf.uint8)
        final_label = tf.squeeze(ops['label'], 1)
        ops['accuracy'] = tf.contrib.metrics.accuracy(final_pred,
                                                      final_label,
                                                      name='accuracy')
        tf.summary.scalar('training accuracy', ops['accuracy'])

        # define optimizer
        lr = tf_layers.get_lr_expdecay(ops['global_step'], FLAGS.base_lr,
                                       FLAGS.lr_decay_steps,
                                       FLAGS.lr_decay_rate, FLAGS.end_lr,
                                       FLAGS.lr_staircase)
        tf.summary.scalar('learning rate', lr)
        if FLAGS.optim == 'momentum':
            optim = tf.train.MomentumOptimizer(lr, momentum=FLAGS.momentum)
        elif FLAGS.optim == 'adam':
            optim = tf.train.AdamOptimizer(lr)
        else:
            raise ValueError('Invalid optimizer {}'.format(FLAGS.optim))
        ops['train'] = optim.minimize(ops['total_loss'],
                                      global_step=ops['global_step'])

        # define saver
        saver = tf.train.Saver(max_to_keep=20)

    # create a session
    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True
    tf_config.allow_soft_placement = True
    tf_config.log_device_placement = False
    sess = tf.Session(config=tf_config)

    # define summary writer, saved log in work_dir/log
    ops['summary'] = tf.summary.merge_all()
    misc['summary_writer'] = tf.summary.FileWriter(
        os.path.join(FLAGS.work_dir, 'log'), sess.graph)

    # initialize variables
    misc['logger'].write('Initialize variables...')
    init = tf.global_variables_initializer()
    sess.run(init)

    # load pretrained model
    if FLAGS.model_path is not None:
        misc['logger'].write('Loading pretrained model from {}'.format(
            FLAGS.model_path))
        saver.restore(sess, os.path.abspath(FLAGS.model_path))
    _start_epoch = sess.run(start_epoch)

    # start training
    misc['logger'].write('Start training from epoch {}'.format(_start_epoch))
    for epoch in range(_start_epoch, FLAGS.max_epoch + 1):
        # print epochX
        misc['logger'].write(
            '============= Epoch#{:3d} ============='.format(epoch))

        # train an epoch
        train_an_epoch(sess, ops, misc)

        # evaluate once in a while
        if epoch % FLAGS.eval_steps == 0:
            evaluate_an_epoch(sess, ops, misc)

        # save model once in a while
        if epoch % FLAGS.save_steps == 0 and epoch != 0:
            saver.save(sess,
                       os.path.join(save_dir, 'model-ep{}.ckpt'.format(epoch)))

        # update start epoch
        sess.run([update_start_ep])
Beispiel #9
0
def main():
    args = parser.parse_args()
    LOG_FILE = args.outdir
    DEBUG = (args.mode == "debug")
    CHECKPOINT_PATH = args.checkpoint_dir + '-' + "BAC"[0]
    MAX_ROLLS = 7
    ITER = 50000
    LOG_ROUND = 10
    env, MAX_PATH_LENGTH, EP_LENGTH_STOP = get_roll_params()

    desired_kl = args.desired_kl
    max_lr, min_lr = .1, 1e-6

    framer = Framer(frame_num=args.frames)
    log_gamma_schedule = U.LinearSchedule(init_t=100,
                                          end_t=3000,
                                          init_val=-2,
                                          end_val=-8,
                                          update_every_t=100)  #This is base 10
    log_beta_schedule = U.LinearSchedule(init_t=100,
                                         end_t=3000,
                                         init_val=0,
                                         end_val=-4,
                                         update_every_t=100)  #This is base 10
    rew_to_advs = PathAdv(gamma=0.98, look_ahead=40)
    logger = U.Logger(logfile=LOG_FILE)
    np.random.seed(args.seed)

    if env.action_space == "discrete":
        act_type = 'disc'
        ac_dim, ac_scale = env.action_size, None
        print('Discrete Action Space. Numer of actions is {}.'.format(
            env.action_size))
    else:
        act_type = 'cont'
        ac_dim, ac_scale = env.action_size, env.action_bound[1]
        print('Continuous Action Space. Action Scale is {}.'.format(ac_scale))
    ob_dim = env.state_size * args.frames
    critic = pol.Critic(num_ob_feat=ob_dim)
    actor = pol.Actor(num_ob_feat=ob_dim,
                      ac_dim=ac_dim,
                      act_type=act_type,
                      ac_scale=ac_scale)
    saver = tf.train.Saver(max_to_keep=3)

    reward = tf.placeholder(dtype=tf.float32)
    tf.summary.scalar("Episode Reward", reward)
    merged = tf.summary.merge_all()
    writer = tf.summary.FileWriter(
        os.path.join('summaries',
                     args.outdir.split('.')[0] + '.data'),
        tf.get_default_graph())

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        tot_rolls = 0
        for i in range(ITER):
            ep_obs, ep_advs, ep_logps, ep_target_vals, ep_acs = [], [], [], [], []
            ep_rews = []
            tot_rews, tot_ent, rolls = 0, 0, 0
            while len(ep_rews) < EP_LENGTH_STOP and rolls < MAX_ROLLS:
                path = rollout(env=env,
                               sess=sess,
                               policy=actor.act,
                               max_path_length=MAX_PATH_LENGTH,
                               framer=framer)
                obs_aug = framer.full(path['obs'])
                ep_obs += obs_aug[:-1]
                ep_logps += path['logps']
                ep_acs += path['acs']
                tot_ent += path['entropy']
                obs_vals = critic.value(obs=obs_aug, sess=sess).reshape(-1)
                target_val, advs = rew_to_advs(rews=path['rews'],
                                               terminal=path['terminated'],
                                               vals=obs_vals)
                ep_target_vals += list(target_val)
                ep_advs += list(advs)
                ep_rews += path['rews']
                tot_rews += sum(path['rews'])

                if rolls == 0 and i % 10 == 0 and DEBUG:
                    actor.printoo(obs=ep_obs, sess=sess)
                    critic.printoo(obs=ep_obs, sess=sess)
                    print('Path length %d' % len(path['rews']))
                    print('Terminated {}'.format(path['terminated']))
                rolls += 1

            avg_rew = float(tot_rews) / rolls
            avg_ent = tot_ent / float(len(ep_logps))
            ep_obs, ep_advs, ep_logps, ep_target_vals, ep_acs, ep_rews = U.make_np(
                ep_obs, ep_advs, ep_logps, ep_target_vals, ep_acs, ep_rews)
            ep_advs.reshape(-1)
            ep_target_vals.reshape(-1)
            ep_advs = (ep_advs - np.mean(ep_advs)) / (1e-8 + np.std(ep_advs))

            if i % 50 == 13 and DEBUG:
                perm = np.random.choice(len(ep_advs), size=20)
                print('Some targets', ep_target_vals[perm])
                print('Some preds', critic.value(ep_obs[perm], sess=sess))
                print('Some logps', ep_logps[perm])

            cir_loss, ev_before, ev_after = train_critic(
                critic=critic, sess=sess, obs=ep_obs, targets=ep_target_vals)
            act_loss = train_actor(actor=actor,
                                   sess=sess,
                                   obs=ep_obs,
                                   advs=ep_advs,
                                   acs=ep_acs,
                                   logps=ep_logps)
            if args.tboard:
                summ, _, _ = sess.run([merged, actor.ac, critic.v],
                                      feed_dict={
                                          actor.ob: ep_obs,
                                          critic.obs: ep_obs,
                                          reward: np.sum(ep_rews)
                                      })
                writer.add_summary(summ, i)
            #logz
            act_lr, cur_beta, cur_gamma = actor.get_opt_param(sess)
            kl_dist = actor.get_kl(sess=sess,
                                   obs=ep_obs,
                                   logp_feeds=ep_logps,
                                   acs=ep_acs)

            #updates the learning rate based on the observed kl_distance and its multiplicative distance to desired_kl
            if kl_dist < desired_kl / 4:
                new_lr = min(max_lr, act_lr * 1.5)
                actor.set_opt_param(sess=sess, new_lr=new_lr)
            elif kl_dist > desired_kl * 4:
                new_lr = max(min_lr, act_lr / 1.5)
                actor.set_opt_param(sess=sess, new_lr=new_lr)

            if log_gamma_schedule.update_time(i):
                new_gamma = np.power(10., log_gamma_schedule.val(i))
                actor.set_opt_param(sess=sess, new_gamma=new_gamma)
                print('\nUpdated gamma from %.4f to %.4f.' %
                      (cur_gamma, new_gamma))
            if log_beta_schedule.update_time(i):
                new_beta = np.power(10., log_beta_schedule.val(i))
                actor.set_opt_param(sess=sess, new_beta=new_beta)
                print('Updated beta from %.4f to %.4f.' % (cur_beta, new_beta))

            logger(i,
                   act_loss=act_loss,
                   circ_loss=np.sqrt(cir_loss),
                   avg_rew=avg_rew,
                   ev_before=ev_before,
                   ev_after=ev_after,
                   act_lr=act_lr,
                   print_tog=(i % 20) == 0,
                   kl_dist=kl_dist,
                   avg_ent=avg_ent)
            if i % 100 == 50:
                logger.flush()

            if i % args.save_every == 0:
                saver.save(sess, CHECKPOINT_PATH, global_step=tot_rolls)
            tot_rolls += rolls

    del logger
Beispiel #10
0
    start_epoch = components['epoch']
    print("Loaded model from {}".format(args.load_path))
print("Decoder: {}".format(args.decoder))

args_for_save = encoder.args
encoder = nn.DataParallel(encoder).cuda()
decoder = nn.DataParallel(decoder).cuda()
criterion = criterion.cuda()

# ----------------------------------------------------------------------------
# training
# ----------------------------------------------------------------------------
encoder.train()
decoder.train()
os.makedirs(args.save_path, exist_ok=True)
logger = utils.Logger(os.path.join(args.save_path, 'log.txt'))

running_loss = 0.0
train_begin = datetime.datetime.utcnow()
print("Training start time: {}".format(
    datetime.datetime.strftime(train_begin, '%d-%b-%Y-%H:%M:%S')))

for epoch in range(1, model_args.num_epochs + 1):
    if epoch == 1:
        clr = utils.cyclic_lr(args.iter_per_epoch, 0.9 * 1e-3, 1.0 * 1e-3)
    elif epoch == 2:
        clr = utils.cyclic_lr(args.iter_per_epoch, 0.9 * 1e-3, 0.8 * 1e-3)
    elif epoch == 3:
        clr = utils.cyclic_lr(args.iter_per_epoch, 0.7 * 1e-3, 0.8 * 1e-3)
    elif epoch == 4:
        clr = utils.cyclic_lr(args.iter_per_epoch, 0.7 * 1e-3, 0.6 * 1e-3)
def main(argv):
    del argv
    filename = os.path.join(
        os.getcwd(), "log-" + strftime("%Y-%m-%d-%H-%M-%S", gmtime()) + ".txt")
    sys.stdout = utils.Logger(filename)  #原来sys.stdout指向控制台,现在重定向到文件

    if FLAGS.need_MP3_to_WAV:
        mp3_dir = FLAGS.mps_dir
        wav_dir = FLAGS.wav_dir
        batchpro_MP3_to_WAV(mp3_dir, wav_dir)

    if FLAGS.need_feature_SMILExtract:
        """
      需要提取特征(文件夹之间的逻辑关系要根据实际情况改写)
      """
        wav_dir = FLAGS.wav_dir
        file_list = os.listdir(wav_dir)
        for f in file_list:
            sub_wav_dir = os.path.join(wav_dir, f)
            if os.path.isdir(sub_wav_dir):  #这个函数只能判断绝对路径
                feature_dir = os.path.join(wav_dir, f + "_feature")
                feature_SMILExtract(sub_wav_dir,
                                    feature_dir)  #sub_wav_dir中不能有文件夹

    if FLAGS.need_save_features:
        """
      需要把特征都整合到一个文件里。(文件夹之间的逻辑关系要根据实际情况改写)
      """
        personal_dir = FLAGS.feature_dir
        file_list = os.listdir(personal_dir)
        feature_list = []
        y_list = []
        for t in file_list:
            if t.__contains__("angry"):
                y = 0
            elif t.__contains__("fear"):
                y = 1
            elif t.__contains__("happy"):
                y = 2
            elif t.__contains__("neutral"):
                y = 3
            elif t.__contains__("sad"):
                y = 4
            else:  # surprise
                y = 5
            sub_dir = os.path.join(personal_dir, t)
            if os.path.isdir(sub_dir):  # 这个函数只能判断绝对路径
                for f in os.listdir(sub_dir):
                    if f[-4:] == ".txt":
                        this_feature_path = os.path.join(sub_dir, f)
                        this_feature = np.loadtxt(this_feature_path,
                                                  skiprows=0)
                        feature_list.append(this_feature)
                        y_list.append(y)
        if len(y_list) != 0:
            y_array = np.array(y_list)
            y_array = y_array.reshape(len(y_array), 1)

            feature_array = np.array(feature_list)
            feature_array = feature_array.reshape(len(feature_array), -1)

            data = np.column_stack((y_array, feature_array))
            print("{0} 中的语音情感特征整合完成,保存在data.txt中,数据集大小{1}\n".format(
                personal_dir, data.shape))

            loc = os.path.join(personal_dir,
                               personal_dir.split('/')[-1] + "_data.txt")
            np.savetxt(loc, data)  #如果txt存在,则重写;如果没有,则创建