コード例 #1
0
def gen_y4m(net, vid, device):
    lr_fns = sorted(
        glob.glob(os.path.join(args.infer_path, '*{:05d}*.npy'.format(vid))))
    lr_fns = lr_fns[::25] if (vid >= mid and len(lr_fns) > 4) else lr_fns
    message = 'generating vid {}, total frame {}'.format(vid, len(lr_fns))
    print(message)
    logging(logger, message)

    frames = []
    for lr_fn in lr_fns:
        lr_tensor = read_as_tensor(lr_fn).to(device)

        sr_tensor = net(lr_tensor).squeeze().float().permute(1, 2, 0)
        sr_tensor = torch.clamp((sr_tensor + mean_torch) * 255, 0,
                                255).round()  # 这个存的位数和numpy似乎有点不一样
        sr_np = sr_tensor.detach().cpu().numpy().astype(
            np.uint8)  # ycbcr for np.load, rgb for misc.imread

        frames.append(sr_np)

    today = time.strftime('%m%d', time.localtime(time.time()))
    spcified_path = os.path.join(args.output_path,
                                 '{}_{}'.format(today, args.model.lower()))

    if not os.path.exists(spcified_path): os.makedirs(spcified_path)
    if vid < mid:
        whole_save_path = os.path.join(spcified_path,
                                       'Youku_{:05d}_h_Res.y4m'.format(vid))
    else:
        whole_save_path = os.path.join(
            spcified_path, 'Youku_{:05d}_h_Sub25_Res.y4m'.format(vid))
    frames2y4m(frames, whole_save_path)

    return vid
コード例 #2
0
def run_assembler(assembler_name,
                  read_alignment,
                  res_gtf,
                  params=None):
  """This function calls assembler (e.g. stringtie etc).

  Args:
    assembler_name: which assembler to use
    read_alignment: absolute path of read alignment file
    res_gtf: absolute path to store assembly res
    params: a dictionary of key as param string (e.g. -h etc)
      and val as the param value. Default is None.
  Returns:
    assembler output in gtf format stored in res_gtf
  """

  cmd = get_default_cmds(assembler_name, read_alignment, res_gtf)

  if params is not None:
    cmd = cmd + util.params_dic2str(params) 

  # pdb.set_trace()
  util.run_cmd(cmd)

  util.logging('%s written'%(res_gtf))

  return res_gtf
コード例 #3
0
def extract_stat(eval_res_prefix):
    """Extract stat from eval_res_prefix.stats file.

  Args:
    eval_res_prefix: prefix of a set of files generated from evaluate()
  Returns:
    a dic w/ key as metric (e.g. Intron) and val as [sens, precision]
      possible keys: Base, Exon, Intron, Transcript etc
  """

    stat_path = eval_res_prefix + '.stats'

    stat = {}  # key: metric val: [sens, prec]

    with open(stat_path, 'r') as fi:

        for line in fi:
            if line[0] == '#': continue
            tokens = [t for t in line.split() if t != '']
            if len(tokens) < 5: continue
            if tokens[1] == 'level:':
                stat[tokens[0]] = [float(tokens[2]), float(tokens[4])]

    util.logging('extracted stats:')
    util.logging(stat)

    return stat
コード例 #4
0
def main(*args, **kwargs):
    """
    """
    if not HAS_MATPLOTLIB:
        raise ImportError(
            "Please install matplotlib to generate a plot from this example.")

    ############################################
    # Step 1: Define parameters for the models #
    ############################################
    util.mkdir_p(FLAGS.run_dir)
    util.logging(FLAGS)
    tf.logging.info('[Step 1]: Define parameters for the models')
    util.report_param(FLAGS)

    ################################
    # Step 2: Define input dataset #
    ################################
    tf.logging.info('[Step 2]: Define input dataset')
    if FLAGS.data_type == 'random_signal':
        x, y = util.random_signal(length=FLAGS.data_length)

    # Save the data into numpy arra
    np.savez_compressed(os.path.join(FLAGS.run_dir, 'input-data'), x=x, y=y)
    plt.plot(x, y)
    plt.savefig(os.path.join(FLAGS.run_dir, 'input-timeseries.jpg'))

    for i in tqdm(range(FLAGS.iterations)):  # train the model iterations
        pass
    print("learning rate", FLAGS.learning_rate)
コード例 #5
0
def extract_stat_shannon(eval_res_dir):
    """Extract stat from eval_res_dir/summary.log file.

  Args:
    eval_res_dir: dir of a set of files generated from evaluate_shannon()
  Returns:
    a dic w/ key as metric (e.g. aligned, detected, ref) and val int
  """

    #TODO(bowen): better summary log
    stat_path = eval_res_dir + '/summary.log'

    stat = {}

    with open(stat_path, 'r') as fi:

        for line in fi:
            if line[0] == '#': continue
            tokens = [t for t in line.split() if t != '']
            if len(tokens) == 0: continue

            if tokens[0] == 'total' and tokens[1] == 'seq':
                stat['aligned'] = int(tokens[2])
                stat['detected'] = int(tokens[4])

            elif tokens[0] == 'num_reference_seq':
                stat['ref'] = int(tokens[1])

    util.logging('extracted stats:')
    util.logging(stat)

    return stat
コード例 #6
0
def check_bo_tune(assembler_name, metric_type, paths):

    print '----- CHECK BO TUNE -----'

    param_default, param_range, param2str = config.parse_params_bo(
        paths['param_config_path'])

    bo = BayesianOptimization(
        call_and_eval_assembler(assembler_name, metric_type, paths, param2str),
        param_range)

    gp_params = {'kernel': None, 'alpha': 1e-3}
    bo.maximize(init_points=5, n_iter=50, kappa=5, **gp_params)

    msg = 'BO-tuned metric (%s) for %s:\n' % (metric_type, assembler_name)
    msg += 'bo.res[max]:\n'
    msg += str(bo.res['max'])

    util.logging(msg, paths['log'])

    msg += '\nbo tune done\n'
    msg += '%s written\n' % paths['log']
    print msg

    return
コード例 #7
0
def calc_metric(stat_dic, metric_type, lam=0.5):
    """Calibrated metric.

  Args:
    stat_dic: a dic w/ key as e.g. Intron etc and val as [sens, precision]
      obtained from extract_stat
    metric_type: indicates the way to calculated the metric.
      possible types:
        'tr-sum' 
        'tr-f1'
    lam: a weight to adjust F1 score
  Returns:
    a fload of metric
  """

    if metric_type == 'tr-sum':
        sens, prec = stat_dic['Transcript']
        return float(sens + prec)
    elif metric_type == 'tr-f1':
        sens, prec = stat_dic['Transcript']
        sens = (1 - lam) * sens
        prec = lam * prec
        sum_val = sens + prec
        if sum_val == 0:
            return 0
        else:
            return float(2 * sens * prec / float(sens + prec))
    else:
        util.logging('unknown metric_type; None to be returned.')
        return None
コード例 #8
0
 def convert(v, tp):
     if tp == 'float':
         return float(v)
     elif tp == 'int':
         return int(v)
     else:
         util.logging('unknown type: ' + tp)
         return None
コード例 #9
0
ファイル: divide.py プロジェクト: froyog/bbs-spider
 def start(self):
     try:
         logger = open('log', 'a')
         for attachId in range(self.initId + 1, self.endId):
             attachment = get_attach(self.rootUrl + str(attachId))
             if attachment:
                 save_file(attachment)
             logging(logger, attachId)
     finally:
         if logger:
             logger.close()
コード例 #10
0
def get_default_cmds(assembler_name, read_alignment, res_gtf):
  """default commands for a particular assembler"""

  if assembler_name == 'stringtie':
    cmd = 'stringtie %s -o %s -p 25 '%(read_alignment, res_gtf)
  elif assembler_name == 'cufflinks':
    res_dir, _ = util.parent_dir(res_gtf)
    cmd = 'cufflinks -o %s %s '%(res_dir, read_alignment)
  else:
    util.logging('unknown assembler: %s'%assembler_name)
    cmd = None

  return cmd
コード例 #11
0
ファイル: qlearn.py プロジェクト: zhaoshaojun/flappy_bird_ai
def q_learning(mode, filename=None):

    if mode == 'test':
        TOTAL_OBSERVATION = 1_000
    else:
        TOTAL_OBSERVATION = 3_200

    observe = TOTAL_OBSERVATION
    epsilon = INITIAL_EPSILON

    # init network
    network = init_network(observe, epsilon, mode, filename)

    # open up a game state to communicate with emulator
    game_state = game.GameState()

    # store the previous observations in replay memory
    queue = deque(maxlen=REPLAY_MEMORY)

    s_t0 = get_init_stack(game_state)

    t = 0
    time0 = time.time()
    total_loss = 0
    while (True):
        action_index, r_t = 0, 0
        a_t = np.zeros([ACTIONS])
        action_index = chose_action(network, s_t0, a_t, t, epsilon)
        a_t[action_index] = 1

        # We reduced the epsilon gradually
        if epsilon > FINAL_EPSILON and t > observe:
            epsilon -= (INITIAL_EPSILON - FINAL_EPSILON) / TOTAL_EXPLORE

        s_t1, r_t, terminal = get_next_stack(game_state, a_t, s_t0)

        queue.append((s_t0, action_index, r_t, s_t1, terminal))

        if t > observe:
            # only train if done observing
            loss, q_sa = train_network(queue, network)
        else:
            loss, q_sa = 0, 0

        total_loss += loss
        s_t0, t = s_t1, t + 1

        logging(mode, t, time0, network, observe, epsilon, action_index, r_t, q_sa, loss, total_loss, TOTAL_EXPLORE)

    print("Episode finished!")
    print("************************")
コード例 #12
0
    def _call_and_eval_assembler(**kwargs):

        params = util.kwargs_to_params_dic(kwargs, param2str)
        if params is not None:
            lam = params.get('-lam', 0.5)
            params.pop('-lam', None)
        else:
            lam = 0.5

        assembler.run_assembler(assembler_name, paths['read_alignment'],
                                paths['assembly_gtf'], params)

        evaluator.evaluate(paths['ref_gtf'], paths['assembly_gtf'],
                           paths['eval_res_prefix'])

        metric_stat = evaluator.extract_stat(paths['eval_res_prefix'])

        #TODO(shunfu): find a good metric for BO
        if metric_stat == {}:
            metric = 0
        else:
            metric = evaluator.calc_metric(metric_stat, metric_type, lam)

        # res logs
        util.logging('assembler:' + assembler_name, paths['log'])
        util.logging(
            'params: default' if params is None else 'params:\n' + str(params),
            paths['log'])
        util.logging('metric stat is: %s' % str(metric_stat), paths['log'])
        util.logging(
            'metric (%s, lam=%.2f) is %f\n' % (metric_type, lam, metric),
            paths['log'])

        return metric
コード例 #13
0
def check_baseline(assembler_name, metric_type, paths):

    print '----- CHECK ASSEMBLY BASELINE -----'

    param_default, param_range, param2str = config.parse_params_bo(
        paths['param_config_path'])

    fn = call_and_eval_assembler(assembler_name, metric_type, paths)
    metric = fn()

    msg = 'Baseline metric (%s) for %s: %f\n\n' % (metric_type, assembler_name,
                                                   metric)
    util.logging(msg, paths['log'])

    print msg
    return
コード例 #14
0
ファイル: dataloader.py プロジェクト: zyqgmzyq/tianchi-uku
 def __init__(self, train_path):
     super(TrainDataset, self).__init__()
     if args.debug:
         args.dN = 2
     self.list_hr = sorted(
         glob.glob(os.path.join(train_path,
                                '*_h_GT_*.npy')))[:-args.dN:args.dR]
     self.list_lr = [hr.replace('h_GT', 'l') for hr in self.list_hr]
     print('1', self.list_lr)
     if args.debug:
         self.list_lr = self.list_lr[:32]
         self.list_hr = self.list_hr[:32]
     print('data lenght: ', len(self.list_lr) * args.repeat)
     logging(logger, 'data lenght: {}'.format(len(self.list_lr)))
     if args.load_mem == 'all':
         self.lr_np = [read_img(lr) for lr in self.list_lr]
         self.hr_np = [read_img(hr) for hr in self.list_hr]
     elif args.load_mem == 'lr':
         self.lr_np = [read_img(lr) for lr in self.list_lr]
         self.hr_np = []
コード例 #15
0
def calc_metric_shannon(stat_dic, metric_type):
    """Calibrated metric.

  Args:
    stat_dic: a dic w/ key as aligned, detected and ref
    metric_type: indicates the way to calculated the metric.
      possible types:
        'tr-sum' 
        'tr-f1'
  Returns:
    a fload of metric
  """

    sens = float(stat_dic['aligned']) / stat_dic['ref']
    prec = float(stat_dic['aligned']) / stat_dic['detected']

    if metric_type == 'tr-sum':
        return float(sens + prec)
    elif metric_type == 'tr-f1':
        return float(2 * sens * prec / float(sens + prec))
    else:
        util.logging('unknown metric_type; None to be returned.')
        return None
コード例 #16
0
def test_params_dic2str():
    """Convert a params_dic to string.
  
  For example,
    params_dic={'-a', 1} returns '-a 1'
    params_dic={'-b', 0.1} returns '-b 0.1'
  Currently only support int/float params.

  Args:
    params_dic: a dic of params
  Returns:
    params_str: a str representing params_dic
  """
    params_dic = {'-a': 1, '-b': 0.1}
    params_str = util.params_dic2str(params_dic)
    util.logging('params_dic:')
    util.logging(params_dic)
    util.logging('params_str:')
    util.logging(params_str)

    return
コード例 #17
0
ファイル: unet_train.py プロジェクト: zyqgmzyq/tianchi-uku
              ]  # 10-20-30-50 -> [10, 20, 30, 50]
scheduler = lrs.MultiStepLR(
    optimizer,
    # 固定迭代次数,达到每次乘下面的gamma  # hdnet用小迭代,dbcn用大迭代
    milestones=milestones,
    gamma=0.5)

if args.ckpt:
    net.load(args.ckpt)
    for i in range(args.repoch):
        scheduler.step()
    print('restored ckpt from {} at step: {}'.format(
        args.ckpt if args.ckpt else ckpt, args.repoch))

now = time.strftime('%Y.%m.%d %H:%M:%S\n', time.localtime(time.time()))
logging(logger, args.message + now)

psnr_best = 0

print('begin training')
for epoch in range(args.repoch, args.epochs):
    scheduler.step()
    start_train = time.time()
    total_loss = 0
    cnt = 0
    for iteration, batch in enumerate(train_loader):
        lr = batch[0]
        label = batch[1]
        label_down4 = F.interpolate(label,
                                    scale_factor=0.25,
                                    mode="bicubic",
コード例 #18
0
        whole_save_path = os.path.join(spcified_path,
                                       'Youku_{:05d}_h_Res.y4m'.format(vid))
    else:
        whole_save_path = os.path.join(
            spcified_path, 'Youku_{:05d}_h_Sub25_Res.y4m'.format(vid))
    frames2y4m(frames, whole_save_path)

    return vid


if __name__ == '__main__':

    # logger = 'log/{}_infer.txt'.format(args.model.lower())
    logger = 's4_rcan_ps32_bs16_lossYUV.txt'
    now = time.strftime('%Y.%m.%d %H:%M:%S\n', time.localtime(time.time()))
    logging(logger, args.message + now)
    logging(logger, 'using gpu: {}\n'.format(torch.cuda.is_available()))

    if args.output_path: mkdirs(args.output_path)
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    mean_numpy = np.array(args.mean).reshape((1, 1, -1))
    mean_torch = torch.from_numpy(mean_numpy).float().to(device)

    #### vid setting ####
    begin = args.begin_id
    mid = args.mid_id
    end = args.end
    assert mid - begin == 5, 'full video must be 5'
    assert end - begin == 50, 'end id must be 50 more than actual given id'
    vids = [i for i in range(begin, end)]
    vids = [i for i in range(begin, end)]
コード例 #19
0
def test_logging():

    msg = 'hello world!'
    util.logging(msg, log='tmp.txt')
コード例 #20
0
def test_kwargs_to_params_dic():

    kwargs_dic = {'a': 1, 'b': 2.0}
    param2str = {'a': ['-a', 'int'], 'b': ['--b', 'float']}
    util.logging(util.kwargs_to_params_dic(kwargs_dic, param2str))
コード例 #21
0
def _test_run_assembler(assembler_name, read_alignment, res_gtf):
    assembler.run_assembler(assembler_name, read_alignment, res_gtf)
    util.logging(res_gtf + ' written')
    return
コード例 #22
0
def test_calc_metric_shannon():
    stat_dic = {'aligned': 2429, 'detected': 10713, 'ref': 7703}
    metric_types = ['tr-sum', 'tr-f1']
    for metric_type in metric_types:
        util.logging(str(metric_type))
        util.logging(str(evaluator.calc_metric_shannon(stat_dic, metric_type)))
コード例 #23
0
def parse_params_old(param_config_path):
    """parse params configs.

  The expected format: 
  - param type default range
  - skip # as comments
  - e.g. -f float 0.1 (0.0,1.0) # bla bla bla

  Args:
    param_config_path: absolute path of param config file.

  Returns:
    a dic w/ key as param str (e.g. f) and val as [default_val]
    a dic w/ key as param str (e.g. f) and val as (range_min, range_max)
    a dic w/ key as param str (e.g. f) and val as [aug param str (e.g. -f), tp]

    Note: these are mained to be used by BO framework

  """
    def convert(v, tp):
        if tp == 'float':
            return float(v)
        elif tp == 'int':
            return int(v)
        else:
            util.logging('unknown type: ' + tp)
            return None

    res_default = {}
    res_range = {}
    res_param2str = {}
    res_range_lis = []
    param_str_list = []

    with open(param_config_path, 'r') as fi:

        for line in fi:
            if line[0] == '#': continue
            tokens = [t for t in line.split() if t != '']
            if len(tokens) < 4: continue
            if tokens[0][0] == '-':
                param_str = tokens[0]
                #TODO(shunfu) init '-' is not asc-ii
                #if param_str=='-min-frags-per-transfrag':
                #  pdb.set_trace()
                #param = param_str.split('-')[-1]
                param = param_str[1:]
                tp = tokens[1]

                res_param2str[param] = [param_str, tp]

                val_default = convert(tokens[2], tp)
                val_range = tokens[3][1:-1].split(',')
                val_min = convert(val_range[0], tp)
                val_max = convert(val_range[1], tp)

                res_default[param] = [val_default]
                if tp == 'float':
                    res_range[param] = tuple([val_min, val_max])
                else:
                    res_range[param] = tuple(
                        [i for i in range(val_min, val_max + 1)])

# Creating a list of dicts; a dict per parameter
                res_range_lis.append({
                    'name':
                    str(param),
                    'type':
                    str('continuous' if tp == 'float' else 'discrete'),
                    'domain':
                    tuple([val_min, val_max])
                })
                param_str_list.append(param_str)

        util.logging('%s parsed as:' % param_config_path)
        util.logging('default:')
        util.logging(str(res_default))
        util.logging('range:')
        util.logging(str(res_range))
        util.logging('param2str:')
        util.logging(str(res_param2str))

    #return res_default, res_range, res_param2str
    #pdb.set_trace()
    return res_range_lis, res_param2str, param_str_list, res_default, res_range
コード例 #24
0
ファイル: submit.py プロジェクト: zqcccc/MLDemo
import warnings
warnings.filterwarnings("ignore")

import torch
import torch.nn as nn

# config
config = util.read_config('./configs/predict.yaml')
torch.manual_seed(config.seed)
# log
log_path = './data/results/'
# log_path = config.log  + '2018-10-20-12:17:22/'
if not os.path.exists(log_path):
    os.mkdir(log_path)
logging = util.logging(log_path+'res_log.txt') # 记录本次运行的记录

# checkpoint
if config.checkpoint_restore:
    print('loading checkpoint from {}...'.format(config.checkpoint_restore))
    checkpoints = torch.load(config.checkpoint_restore)

# cuda
use_cuda = torch.cuda.is_available() and len(config.gpus) > 0
#use_cuda = True
if use_cuda:
    torch.cuda.set_device(config.gpus[0])
    torch.cuda.manual_seed(config.seed)
print('can use cuda: {}'.format(use_cuda))

# data
コード例 #25
0
def test_calc_metric():
    stat_dic = {'Transcript': [0.3, 0.4]}
    metric_types = ['tr-sum', 'tr-f1']
    for metric_type in metric_types:
        util.logging(str(metric_type))
        util.logging(str(evaluator.calc_metric(stat_dic, metric_type)))