Beispiel #1
0
def main():
    try:
        command = sys.arg[1]
    except:
        print(usage)
        return
    if command == 'build':
        build()
    elif command == 'new':
        new()
Beispiel #2
0
def main():
    print("This is argv:", sys.argv)
    command = sys.argv[1]
    print(command)
    if command == "build":
        print("Build was specified")
        utils.build()
    elif command == "new":
        print("New page was specified")
        utils.new()
    else:
        print("Please specify 'build' or 'new'")
def main():
    try:
        command = sys.argv[1]
    except:
        print(USAGE)
        return
    if command == 'build':
        build()
    elif command == 'new':
        new()
    else:
        print(USAGE)
        return
def main():
  opt, logger, stats, vis = utils.build(is_train=False, tb_dir=None, logging=None)
  # Load model opt
  model_opt = np.load(os.path.join(opt.ckpt_path, 'opt.npy')).item()
  model_opt.is_train = False
  # Change geometry to the testing one
  model_opt.geometry = opt.geometry
  model = HeatModel(model_opt)
  print('Loading data from {}'.format(opt.dset_path))

  # For convenience
  opt.iterator = model_opt.iterator

  # Get data
  num = 100
  data = get_data(opt.geometry, num, opt.dset_path, opt.max_temp)

  epoch = opt.which_epochs[0]
  if epoch < 0:
    # Pick last epoch
    checkpoints = glob.glob(os.path.join(opt.ckpt_path, 'net_*.pth'))
    assert len(checkpoints) > 0
    epochs = [int(path[:-4].split('_')[-1]) for path in checkpoints]
    epoch = sorted(epochs)[-1]

  model.load(opt.ckpt_path, epoch)
  print('Checkpoint loaded from {}, epoch {}'.format(opt.ckpt_path, epoch))
  model.setup(is_train=False)
  times = runtime(opt, model, data)

  print('{} examples, {:.3f} sec'.format(len(times), sum(times)))
  print('Adjusted time: {:.3f} sec'.format(np.mean(times) * num))
Beispiel #5
0
def main():
    opt, logger, stats, vis = utils.build(is_train=False, tb_dir='tb_val')
    # Load model opt
    model_opt = np.load(os.path.join(opt.ckpt_path, 'opt.npy')).item()
    model_opt.is_train = False
    # Change geometry to the testing one
    model_opt.geometry = opt.geometry
    model = HeatModel(model_opt)
    logger.print('Loading data from {}'.format(opt.dset_path))

    # For convenience
    opt.initialization = model_opt.initialization
    opt.iterator = model_opt.iterator
    data_loader = data.get_data_loader(opt)
    print('####### Data Loaded ########')

    for epoch in opt.which_epochs:
        if epoch < 0:
            # Pick last epoch
            checkpoints = glob.glob(os.path.join(opt.ckpt_path, 'net_*.pth'))
            assert len(checkpoints) > 0
            epochs = [int(path[:-4].split('_')[-1]) for path in checkpoints]
            epoch = sorted(epochs)[-1]

        model.load(opt.ckpt_path, epoch)
        logger.print('Checkpoint loaded from {}, epoch {}'.format(
            opt.ckpt_path, epoch))
        test(opt, model, data_loader, logger, vis)
Beispiel #6
0
def cache_masks():
    opt, logger = utils.build(is_train=False)
    opt.combine_method = ''
    opt.split = 'train'
    cache_dir_name = 'jaad_collapse{}'.format(
        '_' + opt.combine_method if opt.combine_method else '')
    data.cache_all_objs(opt, cache_dir_name)
def run(args):
    train_csv = args.src
    epochs = args.epochs
    cw = args.cw
    bs = args.bs
    h5file = args.model
    #imports
    import pandas as pd
    train = []
    train_label = []
    csv = pd.read_csv(train_csv, header=None)
    print(csv.columns)
    for index, row in csv.iterrows():
        train.append(utils.preprocess(row[0]))
        train_label.append(row[1])
    train = np.asarray(train)
    train_label = np.asarray(train_label)
    print(str(train.shape[0]) + ' images preprocessed!')
    model = utils.build()
    class_weight = {0: 1., 1: cw}
    model.fit(x=train,
              y=train_label,
              epochs=epochs,
              batch_size=bs,
              class_weight=class_weight,
              shuffle=True)
    model.save(os.path.join(h5file))
def init(mode):
    print("检测tunasync是否存在...")
    code = 1
    cmd = delegator.run("/usr/bin/tunasync -v")
    print(cmd.out)
    if cmd.return_code != 0:
        code = 0
    cmd = delegator.run("/usr/bin/tunasynctl -v")
    print(cmd.out)
    if cmd.return_code != 0:
        code = 0
    if code == 1:
        print("tunasync已存在,无需安装...")
    else:
        if mode:
            mode = int(mode)
        else:
            mode = 2
        print("开始安装...")
        try:
            if mode == 1:
                if bin() < 0:
                    print(
                        "安装tunasync时出错\n您可以自行将tunasync和tunasynctl放置到/usr/bin目录后再进行初始化操作..."
                    )
                    return 0
                else:
                    pass
            else:
                if build() < 0:
                    print(
                        "安装tunasync时出错\n您可以自行将tunasync和tunasynctl放置到/usr/bin目录后再进行初始化操作..."
                    )
                    return 0
                else:
                    pass
        except:
            print(
                "安装tunasync时出错\n您可以自行将tunasync和tunasynctl放置到/usr/bin目录后再进行初始化操作..."
            )
            return 0
    try:
        print("开始设置manager...")
        init_manager()
        print("开始设置服务...")
        systemd()
        print("启动manager...")
        systemd_control('start', 'manager')
        print("设置manager自启...")
        systemd_control('enable', 'manager')
        print("开始设置worker...")
        init_worker()
        # print("启动worker...")
        # systemd_control('start', 'worker')
        # print("设置worker自启...")
        # systemd_control('enable', 'worker')
        print("worker将不会启动直到新增第一个mirror...")
    except:
        print("设置时出错...")
 def compile(self):
     for build_folder in os.listdir(self.builds_path):
         build_path = os.path.join(self.builds_path, build_folder)
         try:
             build(build_path)
             logger.info('Successfully compiled %s' % build_path)
         except RuntimeError as exc:
             logger.error('Unable to build %s' % build_path)
             logger.error(exc)
             move(build_path, self.failures_path)
         
         try:
             build(build_path, clean=True)
         except RuntimeError as exc:
             logger.error('Error while running cleanup for %s' % build_path)
             move(build_path, self.failures_path)
             logger.error(exc)
Beispiel #10
0
def get_model(gray1):
    if ARGS.network == "half_hyper_unet":
        pred1 = net.dowmsample_unet(gray1, reuse=False)
    elif ARGS.network == "hyper_unet":
        pred1 = net.VCN(utils.build(tf.tile(gray1, [1, 1, 1, 3])),
                        reuse=False,
                        div_num=1)
    return pred1
Beispiel #11
0
    def compile(self):
        for build_folder in os.listdir(self.builds_path):
            build_path = os.path.join(self.builds_path, build_folder)
            try:
                build(build_path)
                logger.info('Successfully compiled %s' % build_path)
            except RuntimeError as exc:
                logger.error('Unable to build %s' % build_path)
                logger.error(exc)
                move(build_path, self.failures_path)

            try:
                build(build_path, clean=True)
            except RuntimeError as exc:
                logger.error('Error while running cleanup for %s' % build_path)
                move(build_path, self.failures_path)
                logger.error(exc)
Beispiel #12
0
    def compile(self):
        patch_folders = os.listdir(self.builds_path)
        if len(patch_folders) == 0:
            logger.info('No patch folders found at %s' % self.builds_path)
            return

        for build_folder in patch_folders:
            build_path = os.path.join(self.builds_path, build_folder)
            work_path = os.path.join(build_path, 'hv-rhel7.x/hv')
            try:
                logger.info('Trying to build in %s' % work_path)
                build(work_path)
                logger.info('Successfully compiled %s' % work_path)
            except RuntimeError as exc:
                logger.error('Unable to build %s' % work_path)
                logger.error(exc)
                logger.error('Moving %s to %s ' % (build_path, self.failures_path))
                move(build_path, self.failures_path)
Beispiel #13
0
def layer_add_keywords_send_related(nl_api_elt, query):
    tokens = nl_api_elt['tokens']
    entity_list = u.extract_relevant_entities(nl_api_elt)
    send_index = None
    for send_synonym in SEND_SYNONYMS:
        for i, token in enumerate(tokens):
            if token['lemma'].lower() == send_synonym:
                send_index = i
                break
    if send_index is not None:
        query_before = str(query)  # track changes.
        try:
            reverse_graph = u.reverse_directed_graph(nl_api_elt)
            operands = reverse_graph['reverse_directed_graph'][send_index]
            op_left = operands[0]
            if len(operands) >= 1:
                if op_left < send_index:
                    # Sender part.
                    sender = tokens[op_left]['lemma'].lower()
                    if sender == 'I'.lower():
                        query += u.build('from', 'me')
                    else:
                        entities_sender = [
                            v for v in entity_list if sender in v
                        ]
                        if len(entities_sender) > 0:
                            query += u.build('from', entities_sender[0])
            if len(operands) >= 2:
                op_right = operands[1]
                if op_right > send_index:
                    # Receiver part. could be the particle TO.
                    receiver = tokens[op_right]['lemma'].lower()
                    if receiver == 'to':
                        receiver = tokens[op_right + 1]['lemma'].lower()
                    if receiver == 'me' or receiver in entity_list:
                        query += u.build('to', receiver)
        except Exception, e:
            print(str(e))
        sentence = u.extract_original_sentence(nl_api_elt)
        if query_before == query:  # no modifications
            send_raw_text = tokens[send_index]['text']['content']
            for entity_name in u.extract_relevant_entities(nl_api_elt):
                if '{} {}'.format(send_raw_text, entity_name) in sentence:
                    query += u.build('to', entity_name)
Beispiel #14
0
def layer_add_temporal_keywords(nl_api_elt, query):
    sentence = u.extract_original_sentence(nl_api_elt)
    if 'yesterday' in sentence:
        return query + u.build('newer_than', '1d')
    for digit in DIGITS.keys():
        for time_unit in TIME_UNIT.keys():
            pattern_1 = '{} {} ago'.format(digit, time_unit)
            pattern_2 = '{} {}s ago'.format(digit, time_unit)
            if pattern_1 in sentence or pattern_2 in sentence:
                time_index = DIGITS[digit] * TIME_UNIT[time_unit]
                return query + u.build('newer_than', '{}d'.format(time_index))
    for time_unit in TIME_UNIT.keys():
        if 'last {}'.format(time_unit) in sentence:
            time_index = TIME_UNIT[time_unit]
            return query + u.build('newer_than', '{}d'.format(time_index))
    for keyword in ['night', 'afternoon', 'morning', 'evening']:
        if keyword in sentence:
            return query + u.build('newer_than', '1d')
    return query
Beispiel #15
0
def layer_add_temporal_keywords(nl_api_elt, query):
    sentence = u.extract_original_sentence(nl_api_elt)
    if 'yesterday' in sentence:
        return query + u.build('newer_than', '1d')
    for digit in DIGITS.keys():
        for time_unit in TIME_UNIT.keys():
            pattern_1 = '{} {} ago'.format(digit, time_unit)
            pattern_2 = '{} {}s ago'.format(digit, time_unit)
            if pattern_1 in sentence or pattern_2 in sentence:
                time_index = DIGITS[digit] * TIME_UNIT[time_unit]
                return query + u.build('newer_than', '{}d'.format(time_index))
    for time_unit in TIME_UNIT.keys():
        if 'last {}'.format(time_unit) in sentence:
            time_index = TIME_UNIT[time_unit]
            return query + u.build('newer_than', '{}d'.format(time_index))
    for keyword in ['night', 'afternoon', 'morning', 'evening']:
        if keyword in sentence:
            return query + u.build('newer_than', '1d')
    return query
Beispiel #16
0
def algo7(G):
    """
    build tree in different way.
    """
    T = algo6(G.copy())
    allNodes = set(list(G.nodes))
    for _ in range(1):
        T = addNodes(G, T, allNodes - set(list(T.nodes)))
        T = build(G, list(T.nodes))
        T = removeNodes(G, T)
        T = build(G, list(T.nodes))
    count = 3
    while not is_valid_network(G, T) and count:
        T = addNodes(G, T, allNodes - set(list(T.nodes)))
        T = removeNodes(G, T)
        count -= 1
    if not is_valid_network(G, T):
        return algo6(G.copy())
    return T
Beispiel #17
0
def layer_add_keywords_send_related(nl_api_elt, query):
    tokens = nl_api_elt['tokens']
    entity_list = u.extract_relevant_entities(nl_api_elt)
    send_index = None
    for send_synonym in SEND_SYNONYMS:
        for i, token in enumerate(tokens):
            if token['lemma'].lower() == send_synonym:
                send_index = i
                break
    if send_index is not None:
        query_before = str(query)  # track changes.
        try:
            reverse_graph = u.reverse_directed_graph(nl_api_elt)
            operands = reverse_graph['reverse_directed_graph'][send_index]
            op_left = operands[0]
            if len(operands) >= 1:
                if op_left < send_index:
                    # Sender part.
                    sender = tokens[op_left]['lemma'].lower()
                    if sender == 'I'.lower():
                        query += u.build('from', 'me')
                    else:
                        entities_sender = [v for v in entity_list if sender in v]
                        if len(entities_sender) > 0:
                            query += u.build('from', entities_sender[0])
            if len(operands) >= 2:
                op_right = operands[1]
                if op_right > send_index:
                    # Receiver part. could be the particle TO.
                    receiver = tokens[op_right]['lemma'].lower()
                    if receiver == 'to':
                        receiver = tokens[op_right + 1]['lemma'].lower()
                    if receiver == 'me' or receiver in entity_list:
                        query += u.build('to', receiver)
        except Exception, e:
            print(str(e))
        sentence = u.extract_original_sentence(nl_api_elt)
        if query_before == query:  # no modifications
            send_raw_text = tokens[send_index]['text']['content']
            for entity_name in u.extract_relevant_entities(nl_api_elt):
                if '{} {}'.format(send_raw_text, entity_name) in sentence:
                    query += u.build('to', entity_name)
Beispiel #18
0
def layer_link_entities_to_from(nl_api_elt, query):
    for entity_name in u.extract_relevant_entities(nl_api_elt):
        changed = False
        for token in nl_api_elt['tokens']:
            if token['lemma'].lower() == entity_name:
                hti = token['dependencyEdge']['headTokenIndex']
                particle = nl_api_elt['tokens'][hti]['lemma']
                if particle == 'from':
                    query += u.build('from', entity_name)
                elif particle == 'to':
                    query += u.build('to', entity_name)
                changed = True
                break
        if not changed:  # maybe it's compound.
            sentence = u.extract_original_sentence(nl_api_elt)
            if 'from {}'.format(entity_name) in sentence:
                query += u.build('from', entity_name)
            elif 'to {}'.format(entity_name) in sentence:
                query += u.build('to', entity_name)
    return query
Beispiel #19
0
def main():
    opt, logger, stats, vis = utils.build(is_train=True, tb_dir='tb_train')
    np.save(os.path.join(opt.ckpt_path, 'opt.npy'), opt)
    data_loader = get_data_loader(opt)
    logger.print('Loading data from {}'.format(opt.dset_path))
    print('####### Data loaded #########')
    # Validation
    val_opt = copy.deepcopy(opt)
    val_opt.is_train = False
    val_opt.data_limit = 20
    val_loader = get_data_loader(val_opt)

    model = HeatModel(opt)

    for epoch in range(opt.start_epoch, opt.n_epochs):
        model.setup(is_train=True)
        for step, data in enumerate(data_loader):
            bc, final, x = data['bc'], data['final'], data['x']
            f = None if 'f' not in data else data['f']
            x = utils.initialize(x, bc, opt.initialization)
            loss_dict = model.train(x, final, bc, f)
            if (step + 1) % opt.log_every == 0:
                print('Epoch {}, step {}'.format(epoch, step))
                vis.add_scalar(loss_dict, epoch * len(data_loader) + step)

        logger.print(
            ['[Summary] Epoch {}/{}:'.format(epoch, opt.n_epochs - 1)])

        # Evaluate
        if opt.evaluate_every > 0 and (epoch + 1) % opt.evaluate_every == 0:
            model.setup(is_train=False)
            # Find eigenvalues
            if opt.iterator != 'cg' and opt.iterator != 'unet':
                w, _ = utils.calculate_eigenvalues(model, image_size=15)
                w = sorted(np.abs(w))
                eigenvalues = {'first': w[-2], 'second': w[-3], 'third': w[-4]}
                vis.add_scalar({'eigenvalues': eigenvalues}, epoch)
                logger.print('Eigenvalues: {:.2f}, {:.3f}, {:.3f}, {:.3f}'\
                              .format(w[-1], w[-2], w[-3], w[-4]))

            # Evaluate entire val set
            results, images = evaluate(opt, model, val_loader, logger)
            vis.add_image({'errors': images['error_curves'][0]}, epoch + 1)
            vis.add_scalar(
                {
                    'steps': {
                        'Jacobi': results['Jacobi'],
                        'model': results['model']
                    },
                    'ratio': results['ratio']
                }, epoch + 1)

        if (epoch + 1) % opt.save_every == 0 or epoch == opt.n_epochs - 1:
            model.save(opt.ckpt_path, epoch + 1)
Beispiel #20
0
def layer_link_entities_to_from(nl_api_elt, query):
    for entity_name in u.extract_relevant_entities(nl_api_elt):
        changed = False
        for token in nl_api_elt['tokens']:
            if token['lemma'].lower() == entity_name:
                hti = token['dependencyEdge']['headTokenIndex']
                particle = nl_api_elt['tokens'][hti]['lemma']
                if particle == 'from':
                    query += u.build('from', entity_name)
                elif particle == 'to':
                    query += u.build('to', entity_name)
                changed = True
                break
        if not changed:  # maybe it's compound.
            sentence = u.extract_original_sentence(nl_api_elt)
            if 'from {}'.format(entity_name) in sentence:
                query += u.build('from', entity_name)
            elif 'to {}'.format(entity_name) in sentence:
                query += u.build('to', entity_name)
    return query
Beispiel #21
0
def layer_filter_entities_already_indexed_by_to_from(nl_api_elt, query):
    for entity_name in u.extract_relevant_entities(nl_api_elt):
        if u.build('to', entity_name) in query and u.build(None, entity_name) in query:
            query = query.replace(u.build(None, entity_name), '')
        if u.build('from', entity_name) in query and u.build(None, entity_name) in query:
            query = query.replace(u.build(None, entity_name), '')
    return query
Beispiel #22
0
def dowmsample_unet(input,
                    channel=32,
                    output_channel=3,
                    reuse=False,
                    ext="",
                    div_num=1):
    """
    docstring
    """
    if reuse:
        tf.get_variable_scope().reuse_variables()

    down_input = tf.image.resize_images(
        input, [tf.shape(input)[1] // 2,
                tf.shape(input)[2] // 2])
    down_hyper = utils.build(tf.tile(down_input, [1, 1, 1, 3]))

    conv0 = slim.conv2d(input,
                        32, [3, 3],
                        rate=1,
                        activation_fn=lrelu,
                        scope=ext + 'g_conv0_1')
    conv0 = slim.conv2d(conv0,
                        32, [3, 3],
                        rate=1,
                        activation_fn=lrelu,
                        scope=ext + 'g_conv0_2')
    pool0 = slim.max_pool2d(conv0, [2, 2], padding='SAME')

    net_input = tf.concat([pool0, down_hyper], axis=3)
    net_output = VCN(net_input,
                     output_channel=64,
                     reuse=reuse,
                     div_num=div_num)

    up10 = bilinear_up_and_concat(net_output,
                                  conv0,
                                  channel,
                                  channel * 2,
                                  scope=ext + "g_up_5")
    conv10 = slim.conv2d(up10,
                         channel, [3, 3],
                         rate=1,
                         activation_fn=lrelu,
                         scope=ext + 'g_conv10_1')
    conv10 = slim.conv2d(conv10,
                         output_channel * div_num, [3, 3],
                         rate=1,
                         activation_fn=None,
                         scope=ext + 'g_conv10_2')
    return conv10
Beispiel #23
0
def evaluate_dataset(csv_path, target_index, problem, model, parameter_dict, method='holdout', seed=20, max_iter=50):
    print('Now evaluating {}...'.format(csv_path))
    x, y = build(csv_path, target_index)

    wrapper = Loss(model, x, y, method=method, problem=problem)

    # print('Evaluating PI')
    # np.random.seed(seed)
    # sexp = SquaredExponential()
    # gp = GaussianProcess(sexp, optimize=True, usegrads=True)
    # acq_pi = Acquisition(mode='probability_improvement')
    # bo_pi = BO(gp, acq_pi, wrapper.evaluate_loss, parameter_dict, n_jobs=1)
    # bo_pi.run(max_iter=max_iter)

    print('Evaluating EI')
    np.random.seed(seed)
    sexp = SquaredExponential()
    gp = GaussianProcess(sexp, optimize=True, usegrads=True)
    acq_ei = Acquisition(mode='expected_improvement')
    bo_ei = BO(gp, acq_ei, wrapper.evaluate_loss, parameter_dict, n_jobs=1)
    bo_ei.run(max_iter=max_iter)

    # Also add gpucb, beta = 0.5, beta = 1.5
    print('Evaluating GP-gpucb beta = 0.5')
    np.random.seed(seed)
    sexp = SquaredExponential()
    gp = GaussianProcess(sexp, optimize=True, usegrads=True)
    acq_ucb = Acquisition(mode='gpucb', beta=0.5)
    bo_ucb = BO(gp, acq_ucb, wrapper.evaluate_loss, parameter_dict, n_jobs=1)
    bo_ucb.run(max_iter=max_iter)

    # print('Evaluating GP-gpucb beta = 1.5')
    # np.random.seed(seed)
    # sexp = SquaredExponential()
    # gp = GaussianProcess(sexp, optimize=True, usegrads=True)
    # acq_ucb2 = Acquisition(mode='gpucb', beta=1.5)
    # bo_ucb2 = BO(gp, acq_ucb2, wrapper.evaluate_loss, parameter_dict, n_jobs=1)
    # bo_ucb2.run(max_iter=max_iter)

    print('Evaluating random')
    np.random.seed(seed)
    r = evaluate_random(bo_ei, wrapper.evaluate_loss, n_eval=max_iter + 1)
    r = cum_max(r)

    # pi_h = np.array(gpgo_pi.history)
    ei_h = np.array(bo_ei.history)
    ucb1_h = np.array(bo_ucb.history)
    # ucb2_h = np.array(gpgo_ucb2.history)

    return ei_h, ucb1_h, r
Beispiel #24
0
def layer_filter_entities_already_indexed_by_to_from(nl_api_elt, query):
    for entity_name in u.extract_relevant_entities(nl_api_elt):
        if u.build('to', entity_name) in query and u.build(
                None, entity_name) in query:
            query = query.replace(u.build(None, entity_name), '')
        if u.build('from', entity_name) in query and u.build(
                None, entity_name) in query:
            query = query.replace(u.build(None, entity_name), '')
    return query
Beispiel #25
0
def main():
  opt, logger, vis = utils.build(is_train=False)

  dloader = data.get_data_loader(opt)
  print('Val dataset: {}'.format(len(dloader.dataset)))
  model = models.get_model(opt)

  for epoch in opt.which_epochs:
    # Load checkpoint
    if epoch == -1:
      # Find the latest checkpoint
      checkpoints = glob.glob(os.path.join(opt.ckpt_path, 'net*.pth'))
      assert len(checkpoints) > 0
      epochs = [int(filename.split('_')[-1][:-4]) for filename in checkpoints]
      epoch = max(epochs)
    logger.print('Loading checkpoints from {}, epoch {}'.format(opt.ckpt_path, epoch))
    model.load(opt.ckpt_path, epoch)

    results = evaluate(opt, dloader, model)
    for metric in results:
      logger.print('{}: {}'.format(metric, results[metric]))
Beispiel #26
0
"""
Test that we create Predicate on properties of the root AST node.
"""

from __future__ import absolute_import, division, print_function

from langkit.dsl import ASTNode, LogicVar, UserField
from langkit.expressions import Predicate, Self, Var, ignore, langkit_property

from utils import build


class FooNode(ASTNode):
    @langkit_property()
    def sophisticated_predicate():
        return True


class Example(FooNode):
    var1 = UserField(LogicVar, public=False)

    @langkit_property(public=True)
    def prop():
        ignore(Var(Predicate(FooNode.sophisticated_predicate, Self.var1)))
        return Self.as_bare_entity


build(lkt_file='expected_concrete_syntax.lkt')
print('Compilation was successful')
Beispiel #27
0
import pandas as pd
from sklearn.model_selection import train_test_split
from utils import save_vocabulary
from utils import build, preprocessing, plot
from CNN import CNN
import tensorflow as tf
import pickle

# Load config
config = yaml.safe_load(open('./src/CNN/config.yaml'))

# Load data
df_data = pd.read_csv(config['TRAIN_DATA'])[:200]

# build vocabulary
df_data, vocabulary, word_to_num, num_to_word = build(df_data)

# save
save_vocabulary(config['VOCABULARY'], vocabulary, word_to_num, num_to_word)

# split data
df_train, df_val = train_test_split(df_data, test_size=0.2)
df_train = df_train.reset_index(drop=True)
df_val = df_val.reset_index(drop=True)

# pre data
train_it = preprocessing(df_train, word_to_num)
val_it = preprocessing(df_val, word_to_num)

if __name__ == '__main__':
    num_word = len(vocabulary)
Beispiel #28
0
gray_flow_backward = tf.placeholder(
    tf.float32, shape=[None, None, None, 2 * (num_frame - 1)])
c0 = tf.placeholder(tf.float32, shape=[None, None, None, 3])
c1 = tf.placeholder(tf.float32, shape=[None, None, None, 3])

lossDict = {}
objDict = {}

#   X0, X1: Gray frames
#   Y0, Y1: Ground truth color frames
#   C0, C1: Colorized frames
with tf.variable_scope(tf.get_variable_scope()):
    X0, X1 = input_i[:, :, :, 0:1], input_i[:, :, :, 1:2]
    Y0, Y1 = input_target[:, :, :, 0:3], input_target[:, :, :, 3:6]
    with tf.variable_scope('individual'):
        C0 = net.VCN(utils.build(tf.tile(X0, [1, 1, 1, 3])), reuse=False)
        C1 = net.VCN(utils.build(tf.tile(X1, [1, 1, 1, 3])), reuse=True)

    objDict["mask"], _ = occlusion_mask(Y0, Y1, input_flow_backward[:, :, :,
                                                                    0:2])
    objDict["warped"] = flow_warp_op.flow_warp(
        C0, input_flow_backward[:, :, :, 0:2])

    lossDict["RankDiv_im1"] = loss.RankDiverse_loss(
        C0, tf.tile(input_target[:, :, :, 0:3], [1, 1, 1, div_num]), div_num)
    lossDict["RankDiv_im2"] = loss.RankDiverse_loss(
        C1, tf.tile(input_target[:, :, :, 3:6], [1, 1, 1, div_num]), div_num)
    lossDict["RankDiv"] = lossDict["RankDiv_im1"] + lossDict["RankDiv_im2"]

    lossDict['Bilateral_im1'] = sum([
        loss.KNN_loss(C0[:, :, :, 3 * i:3 * i + 3], input_idx[:, 0:5])
Beispiel #29
0
import utils
import sys

#run--------------
if __name__ == "__main__":
    print("This is argv:", sys.argv)
    if len(sys.argv) > 1:
        command = sys.argv[1]
        if command == "build":
            print("Build was specified")
            utils.build()
            print(
                "Your content pages are now templated into your website design"
            )
        elif command == "new":
            print("New page was specified")
            utils.new()
            print("A new content page has been created at content/new.html")
        else:
            print("Usage:")
            print("  To rebuild site: python manage.py build")
            print("  To create new page: python manage.py new")
    else:
        print("Usage:")
        print("  To rebuild site: python manage.py build")
        print("  To create new page: python manage.py new")  #DRY... :/
Beispiel #30
0
import copy
import numpy as np
import os

import data
import models
import utils
import sys
from test import evaluate

opt, logger, vis = utils.build(is_train=True, tb_dir='tb_train')
train_loader = data.get_data_loader(opt)
# Validation set
val_opt = copy.deepcopy(opt)
val_opt.is_train = False
val_opt.num_objects = [1]  # Only matters for MNIST
val_loader = data.get_data_loader(val_opt)
print('Val dataset: {}'.format(len(val_loader.dataset)))

model = models.get_model(opt)

save_every = 5

if opt.load_ckpt_dir != '':
    ckpt_dir = os.path.join(opt.ckpt_dir, opt.dset_name, opt.load_ckpt_dir)
    assert os.path.exists(ckpt_dir)
    logger.print('Loading checkpoint from {}'.format(ckpt_dir))
    model.load(ckpt_dir, opt.load_ckpt_epoch)

opt.n_epochs = max(opt.n_epochs, opt.n_iters // len(train_loader))
logger.print('Total epochs: {}'.format(opt.n_epochs))
Beispiel #31
0
gray_flow_backward=tf.placeholder(tf.float32,shape=[None,None,None,2*(num_frame-1)])
c0=tf.placeholder(tf.float32,shape=[None,None,None,3])
c1=tf.placeholder(tf.float32,shape=[None,None,None,3])


lossDict = {}
objDict={} 

#   X0, X1: Gray frames
#   Y0, Y1: Ground truth color frames
#   C0, C1: Colorized frames
with tf.variable_scope(tf.get_variable_scope()):
    X0, X1 = input_i[:,:,:,0:1], input_i[:,:,:,1:2]
    Y0, Y1 = input_target[:,:,:,0:3], input_target[:,:,:,3:6]
    with tf.variable_scope('individual'):
        C0=net.VCN(utils.build(tf.tile(X0, [1,1,1,3])),reuse=False, div_num=div_num)
        C1=net.VCN(utils.build(tf.tile(X1, [1,1,1,3])),reuse=True, div_num=div_num)   

    objDict["mask"],_=occlusion_mask(Y0,Y1,input_flow_backward[:,:,:,0:2])
    objDict["warped"]=flow_warp_op.flow_warp(C0,input_flow_backward[:,:,:,0:2])

    lossDict["RankDiv_im1"]=loss.RankDiverse_loss(C0, tf.tile(input_target[:,:,:,0:3], [1,1,1,div_num]),div_num)
    lossDict["RankDiv_im2"]=loss.RankDiverse_loss(C1, tf.tile(input_target[:,:,:,3:6], [1,1,1,div_num]),div_num)
    lossDict["RankDiv"]=lossDict["RankDiv_im1"]+lossDict["RankDiv_im2"]

    lossDict['Bilateral_im1']= sum([loss.KNN_loss(C0[:,:,:,3*i:3*i+3], input_idx[:,0:5]) for i in range(4)])
    lossDict['Bilateral_im2']= sum([loss.KNN_loss(C1[:,:,:,3*i:3*i+3], input_idx[:,5:10])for i in range(4)])
    lossDict['Bilateral']= lossDict['Bilateral_im2'] + lossDict['Bilateral_im1']

    lossDict["temporal"]=tf.reduce_mean(tf.multiply(tf.abs(objDict["warped"]-C1),tf.tile(objDict["mask"],[1,1,1,4])))*5
Beispiel #32
0
def layer_entities(nl_api_elt, query):
    for entity_name in u.extract_relevant_entities(nl_api_elt):
        query += u.build(None, entity_name)
    return query
Beispiel #33
0
# -*- coding: utf-8 -*-
# created hy HaiYan Yu on 2019/03/05
import argparse

from model.config import Config
from utils import process, build, train, save

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--input_path', required=True)
    parser.add_argument('--data_path', required=True)
    parser.add_argument('--model_path', required=True)
    parser.add_argument('--train_ratio', type=float, default=0.8)
    parser.add_argument('--dev_ratio', type=float, default=0.2)
    parser.add_argument('--epoch', type=float, default=15)
    parser.add_argument('--optimizer', default='adam')
    parser.add_argument('--dropout', type=float, default=0.5)

    args = parser.parse_args()

    process(args=args)
    config = Config(load=False, args=args)
    build(config)
    config.load()
    train(config)
    save(args=args)
Beispiel #34
0
def main():
    pages = utils.generate_page_list()
    utils.build(pages)
        rate=1,
        activation_fn=None,
        weights_initializer=tf.contrib.layers.xavier_initializer(),
        scope=ext + 'g_conv9_2')
    return conv9


config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
input_i = tf.placeholder(tf.float32, shape=[1, None, None, 2])
input_target = tf.placeholder(tf.float32, shape=[1, None, None, 6])

with tf.variable_scope(tf.get_variable_scope()):
    with tf.variable_scope('individual'):
        g0 = VCN(utils.build(tf.tile(input_i[:, :, :, 0:1], [1, 1, 1, 3])),
                 reuse=False)
        g1 = VCN(utils.build(tf.tile(input_i[:, :, :, 1:2], [1, 1, 1, 3])),
                 reuse=True)

saver = tf.train.Saver(max_to_keep=1000)
sess.run([tf.global_variables_initializer()])

var_restore = [v for v in tf.trainable_variables()]
saver_restore = tf.train.Saver(var_restore)
ckpt = tf.train.get_checkpoint_state(model)
print('loaded ' + ckpt.model_checkpoint_path)
saver_restore.restore(sess, ckpt.model_checkpoint_path)

if not len(test_img):
    img_names = utils.get_names(test_dir)
Beispiel #36
0
def hyper_unet(input,
               channel=32,
               output_channel=3,
               reuse=False,
               ext="",
               div_num=1):
    if reuse:
        tf.get_variable_scope().reuse_variables()
    input = utils.build(tf.tile(input, [1, 1, 1, 3]))
    conv1 = slim.conv2d(
        input,
        channel, [1, 1],
        rate=1,
        activation_fn=lrelu,
        weights_initializer=tf.contrib.layers.xavier_initializer(),
        scope=ext + 'g_conv1_1')
    conv1 = slim.conv2d(
        conv1,
        channel, [3, 3],
        rate=1,
        activation_fn=lrelu,
        weights_initializer=tf.contrib.layers.xavier_initializer(),
        scope=ext + 'g_conv1_2')
    pool1 = slim.max_pool2d(conv1, [2, 2], padding='SAME')
    conv2 = slim.conv2d(
        pool1,
        channel * 2, [3, 3],
        rate=1,
        activation_fn=lrelu,
        weights_initializer=tf.contrib.layers.xavier_initializer(),
        scope=ext + 'g_conv2_1')
    conv2 = slim.conv2d(
        conv2,
        channel * 2, [3, 3],
        rate=1,
        activation_fn=lrelu,
        weights_initializer=tf.contrib.layers.xavier_initializer(),
        scope=ext + 'g_conv2_2')
    pool2 = slim.max_pool2d(conv2, [2, 2], padding='SAME')
    conv3 = slim.conv2d(
        pool2,
        channel * 4, [3, 3],
        rate=1,
        activation_fn=lrelu,
        weights_initializer=tf.contrib.layers.xavier_initializer(),
        scope=ext + 'g_conv3_1')
    conv3 = slim.conv2d(
        conv3,
        channel * 4, [3, 3],
        rate=1,
        activation_fn=lrelu,
        weights_initializer=tf.contrib.layers.xavier_initializer(),
        scope=ext + 'g_conv3_2')
    pool3 = slim.max_pool2d(conv3, [2, 2], padding='SAME')
    conv4 = slim.conv2d(
        pool3,
        channel * 8, [3, 3],
        rate=1,
        activation_fn=lrelu,
        weights_initializer=tf.contrib.layers.xavier_initializer(),
        scope=ext + 'g_conv4_1')
    conv4 = slim.conv2d(
        conv4,
        channel * 8, [3, 3],
        rate=1,
        activation_fn=lrelu,
        weights_initializer=tf.contrib.layers.xavier_initializer(),
        scope=ext + 'g_conv4_2')
    pool4 = slim.max_pool2d(conv4, [2, 2], padding='SAME')
    conv5 = slim.conv2d(
        pool4,
        channel * 16, [3, 3],
        rate=1,
        activation_fn=lrelu,
        weights_initializer=tf.contrib.layers.xavier_initializer(),
        scope=ext + 'g_conv5_1')
    conv5 = slim.conv2d(
        conv5,
        channel * 16, [3, 3],
        rate=1,
        activation_fn=lrelu,
        weights_initializer=tf.contrib.layers.xavier_initializer(),
        scope=ext + 'g_conv5_2')
    up6 = bilinear_up_and_concat(conv5,
                                 conv4,
                                 channel * 8,
                                 channel * 16,
                                 scope=ext + "g_up_1")
    conv6 = slim.conv2d(
        up6,
        channel * 8, [3, 3],
        rate=1,
        activation_fn=lrelu,
        weights_initializer=tf.contrib.layers.xavier_initializer(),
        scope=ext + 'g_conv6_1')
    conv6 = slim.conv2d(
        conv6,
        channel * 8, [3, 3],
        rate=1,
        activation_fn=lrelu,
        weights_initializer=tf.contrib.layers.xavier_initializer(),
        scope=ext + 'g_conv6_2')
    up7 = bilinear_up_and_concat(conv6,
                                 conv3,
                                 channel * 4,
                                 channel * 8,
                                 scope=ext + "g_up_2")
    conv7 = slim.conv2d(
        up7,
        channel * 4, [3, 3],
        rate=1,
        activation_fn=lrelu,
        weights_initializer=tf.contrib.layers.xavier_initializer(),
        scope=ext + 'g_conv7_1')
    conv7 = slim.conv2d(
        conv7,
        channel * 4, [3, 3],
        rate=1,
        activation_fn=lrelu,
        weights_initializer=tf.contrib.layers.xavier_initializer(),
        scope=ext + 'g_conv7_2')
    up8 = bilinear_up_and_concat(conv7,
                                 conv2,
                                 channel * 2,
                                 channel * 4,
                                 scope=ext + "g_up_3")
    conv8 = slim.conv2d(
        up8,
        channel * 2, [3, 3],
        rate=1,
        activation_fn=lrelu,
        weights_initializer=tf.contrib.layers.xavier_initializer(),
        scope=ext + 'g_conv8_1')
    conv8 = slim.conv2d(
        conv8,
        channel * 2, [3, 3],
        rate=1,
        activation_fn=lrelu,
        weights_initializer=tf.contrib.layers.xavier_initializer(),
        scope=ext + 'g_conv8_2')
    up9 = bilinear_up_and_concat(conv8,
                                 conv1,
                                 channel,
                                 channel * 2,
                                 scope=ext + "g_up_4")
    conv9 = slim.conv2d(
        up9,
        channel, [3, 3],
        rate=1,
        activation_fn=lrelu,
        weights_initializer=tf.contrib.layers.xavier_initializer(),
        scope=ext + 'g_conv9_1')
    conv9 = slim.conv2d(
        conv9,
        output_channel * div_num, [3, 3],
        rate=1,
        activation_fn=None,
        weights_initializer=tf.contrib.layers.xavier_initializer(),
        scope=ext + 'g_conv9_2')
    return conv9
Beispiel #37
0
def layer_add_keywords(nl_api_elt, query):
    for token in nl_api_elt['tokens']:
        lemma = token['lemma']
        if lemma in KEYWORDS:
            query += u.build(None, lemma)
    return query
Beispiel #38
0
def layer_add_attachment_tag(nl_api_elt, query):
    return query + u.build('has', 'attachment')