예제 #1
0
파일: sanity.py 프로젝트: brettviren/garpi
def check_debian():
    arch = cmd('dpkg --print-architecture',output=True).strip()
    mach = cmd('uname -m',output=True).strip()
    if arch == 'i386' and mach == 'x86_64':
        log.warning('''Your kernel claims to be x86_64 but your user space claims to be i386.  This will greatly confuse the various build systems.  To work around this issue, either install an i686 kernel or run "linux32 /bin/bash" before installing or running the code.''')
        return False
    return True
예제 #2
0
파일: fs.py 프로젝트: brettviren/garpi
def goback():
    'Return the the most recent directory from which a goto() was issued'
    if not dirStack:
        log.warning('Directory stack empty')
        return
    theDir = dirStack.pop()
    os.chdir(theDir)
    log.debug('goback to %s'%theDir)
    return theDir
예제 #3
0
def check_running_firmware(client: Client) -> State:
    """current running firmware"""
    # Verify only one version from firmwareRunning and firmwareCtrlrRunning
    versions = set()
    for record in client.get_class("firmwareRunning"):
        versions.add(record["peVer"])
    for record in client.get_class("fimrwareCtrlrRunning"):
        versions.add(record["version"])
    if len(versions) > 1:
        log.warning("Multiple firmware versions found",
                    versions=list(versions))
    elif client.args["debug"] and len(versions) > 0:
        log.debug("Firmware:", version=versions.pop())
    return State.OK
def main():
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('--batch_size', type=int, default=64)
    parser.add_argument('--max_batch_size', type=int, default=64)
    parser.add_argument('--prefix', type=str, default='default',
                        help='the nickname of this training job')
    parser.add_argument('--checkpoint', type=str, default=None)
    parser.add_argument('--dataset', type=str, default='MNIST',
                        choices=['MNIST', 'Fashion', 'SVHN',
                                 'CIFAR10', 'ImageNet', 'TinyImageNet'])
    parser.add_argument('--norm_type', type=str, default='batch',
                        choices=['batch', 'group'])
    # Log
    parser.add_argument('--max_training_step', type=int, default=100000)
    parser.add_argument('--log_step', type=int, default=10)
    parser.add_argument('--test_sample_step', type=int, default=10)
    parser.add_argument('--write_summary_step', type=int, default=10)
    parser.add_argument('--ckpt_save_step', type=int, default=1000)
    # Learning
    parser.add_argument('--learning_rate', type=float, default=1e-5)
    parser.add_argument('--no_adjust_learning_rate', action='store_true', default=False)
    config = parser.parse_args()


    if config.dataset == 'MNIST':
        import datasets.mnist as dataset
    elif config.dataset == 'Fashion':
        import datasets.fashion_mnist as dataset
    elif config.dataset == 'SVHN':
        import datasets.svhn as dataset
    elif config.dataset == 'CIFAR10':
        import datasets.cifar10 as dataset
    elif config.dataset == 'TinyImageNet':
        import datasets.tiny_imagenet as dataset
    elif config.dataset == 'ImageNet':
        import datasets.imagenet as dataset
    else:
        raise ValueError(config.dataset)

    dataset_train, dataset_test = dataset.create_default_splits()
    image, label = dataset_train.get_data(dataset_train.ids[0])
    config.data_info = np.concatenate([np.asarray(image.shape), np.asarray(label.shape)])

    trainer = Trainer(config,
                      dataset_train, dataset_test)

    log.warning("dataset: %s, learning_rate: %f", config.dataset, config.learning_rate)
    trainer.train()
예제 #5
0
def main():
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('--batch_size', type=int, default=64)
    parser.add_argument('--model',
                        type=str,
                        default='conv',
                        choices=['mlp', 'conv'])
    parser.add_argument('--prefix', type=str, default='default')
    parser.add_argument('--checkpoint', type=str, default=None)
    parser.add_argument('--dataset',
                        type=str,
                        default='MNIST',
                        choices=['MNIST', 'SVHN', 'CIFAR10'])
    parser.add_argument('--learning_rate', type=float, default=1e-4)
    parser.add_argument('--update_rate', type=int, default=5)
    parser.add_argument('--lr_weight_decay',
                        action='store_true',
                        default=False)
    config = parser.parse_args()

    if config.dataset == 'MNIST':
        from datasets.mnist import create_default_splits
        config.data_info = np.array([28, 28, 10, 1])
        config.conv_info = np.array([32, 64, 128])
        config.deconv_info = np.array([[100, 2, 1], [25, 3, 2], [6, 4, 2],
                                       [1, 6, 2]])
    elif config.dataset == 'SVHN':
        from datasets.svhn import create_default_splits
        config.data_info = np.array([32, 32, 10, 3])
        config.conv_info = np.array([64, 128, 256])
        config.deconv_info = np.array([[384, 2, 1], [128, 4, 2], [64, 4, 2],
                                       [3, 6, 2]])
    elif config.dataset == 'CIFAR10':
        from datasets.cifar10 import create_default_splits
        config.data_info = np.array([32, 32, 10, 3])
        config.conv_info = np.array([64, 128, 256])
        config.deconv_info = np.array([[384, 2, 1], [128, 4, 2], [64, 4, 2],
                                       [3, 6, 2]])
    else:
        raise ValueError(config.dataset)

    dataset_train, dataset_test = create_default_splits()

    trainer = Trainer(config, dataset_train, dataset_test)

    log.warning("dataset: %s, learning_rate: %f", config.dataset,
                config.learning_rate)
    trainer.train()
예제 #6
0
def check_maintenance_groups(client: Client) -> State:
    """switches are in maintenance groups"""
    # Verify all switches from topSystem are also in maintUpgJob objects
    job_dns = []
    for job in client.get_class("maintUpgJob"):
        if job.get("maintGrp", "") != "" and job["dn"].startswith("topology"):
            job_dns.append(get_node_dn(job["dn"]))
    for device in client.get_class("topSystem"):
        if device["role"] == "spine" or device["role"] == "leaf":
            if get_node_dn(device["dn"]) not in job_dns:
                log.warning("Device not in maintenance group",
                            name=device["name"])
                return State.FAIL
    log.debug("All devices in maintenance groups")
    return State.OK
예제 #7
0
 def cmd_inrel(self, cmdstr):
     'Run a command in the release package.  No-op if none defined.'
     relpkg = self.rel_pkg()
     if not relpkg: 
         err = 'Project %s has no release package defined'%self.name
         log.warning(err)
         raise CommandFailure, err
     import fs
     relpkgcmt = os.path.join(fs.projects(), self.name, relpkg, 'cmt')
     if not os.path.exists(relpkgcmt):
         err = 'Project %s has release package defined, but no dir: %s'%(self.name, relpkgcmt)
         log.warning(err)
         raise CommandFailure, err
     import command
     command.cmd(cmdstr, env=self.env(), dir=relpkgcmt)
예제 #8
0
def check_tcam_scale(client: Client) -> State:
    """per-leaf TCAM scale"""
    # Verify polUsageCum <= polUsageCapCum for eqptcapacityPolUsage5min
    over_limit = False
    for record in client.get_class("eqptcapacityPolUsage5min"):
        node_dn = get_node_dn(record["dn"])
        count = get_path(int, record, "polUsageCum")
        limit = get_path(int, record, "polUsageCapCum")
        if count > 0 and count >= limit:
            over_limit = True
            log.warning(f"Over TCAM scale on {node_dn}",
                        count=count,
                        limit=limit)
        if client.args["debug"]:
            log.debug(f"TCAM scale on {node_dn}", count=count, limit=limit)
    return State.FAIL if over_limit else State.OK
예제 #9
0
def main():

    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
    tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)

    config, model, dataset_train, dataset_val, dataset_test = argparser(is_train=False)
    log.warning("dataset path: %s", config.dataset_path)

    evaler_val = Evaler(config, model, dataset_val, 'val')
    evaler_val.eval_run()

    evaler_train = Evaler(config, model, dataset_train, 'train')
    evaler_train.eval_run()

    evaler_train = Evaler(config, model, dataset_test, 'test')
    evaler_train.eval_run()
예제 #10
0
def main():
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('--batch_size', type=int, default=16 * 1)
    parser.add_argument('--model',
                        type=str,
                        default='ilp',
                        choices=['rn', 'baseline'])
    # parser.add_argument('--checkpoint_path', type=str,default='./train_dir/ilp-default-Sort-of-CLEVR_default_lr_0.0025-20190619-195552/model-32000')
    # parser.add_argument('--checkpoint_path', type=str,default='./train_dir/ilp-default-Sort-of-CLEVR_default_lr_0.0025-20190619-115754/model-42000')
    parser.add_argument(
        '--checkpoint_path',
        type=str,
        default=
        './train_dir/ilp-default-Sort-of-CLEVR_default_lr_0.002-20190807-173045/model-80000'
    )
    parser.add_argument('--train_dir', type=str)
    parser.add_argument('--dataset_path',
                        type=str,
                        default='Sort-of-CLEVR_default')
    parser.add_argument('--data_id', nargs='*', default=None)
    config = parser.parse_args()

    path = os.path.join('./datasets', config.dataset_path)

    if check_data_path(path):
        import sort_of_clevr as dataset
    else:
        raise ValueError(path)

    config.data_info = dataset.get_data_info()
    config.conv_info = dataset.get_conv_info()
    dataset_train, dataset_test = dataset.create_default_splits(path)

    evaler = Evaler(config, dataset_test)

    # qs1=[]
    # qs2=[]
    # ans=[]
    # for id in dataset_test._ids:
    #     dt = dataset_train.get_data(id)
    #     qs1.append( np.argmax(dt[1][:6]) )
    #     qs2.append( np.argmax(dt[1][6:]) )
    #     ans.append( np.argmax(dt[2]) )

    log.warning("dataset: %s", config.dataset_path)
    evaler.eval_run()
예제 #11
0
def main():
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('--batch_size', type=int, default=16)
    parser.add_argument('--prefix', type=str, default='default')
    parser.add_argument('--checkpoint', type=str, default=None)
    parser.add_argument('--dataset',
                        type=str,
                        default='MNIST',
                        choices=['MNIST', 'SVHN', 'CIFAR10'])
    parser.add_argument('--learning_rate', type=float, default=1e-4)
    parser.add_argument('--alpha', type=float, default=1.0)
    parser.add_argument('--lr_weight_decay',
                        action='store_true',
                        default=False)
    parser.add_argument('--dump_result', action='store_true', default=False)
    parser.add_argument(
        '--distribution',
        type=str,
        default='Uniform',
        choices=['Uniform', 'Gaussian', 'Mixture', 'Gamma', 'Beta'])
    parser.add_argument('--dimension', type=int, default=100)
    config = parser.parse_args()

    if config.dataset == 'MNIST':
        import datasets.mnist as dataset
    elif config.dataset == 'SVHN':
        import datasets.svhn as dataset
    elif config.dataset == 'CIFAR10':
        import datasets.cifar10 as dataset
    else:
        raise ValueError(config.dataset)

    config.conv_info = dataset.get_conv_info()
    config.deconv_info = dataset.get_deconv_info()
    dataset_train, dataset_test = dataset.create_default_splits(
        distribution=config.distribution, dimension=config.dimension)

    m, l = dataset_train.get_data(dataset_train.ids[0])
    config.data_info = np.concatenate(
        [np.asarray(m.shape), np.asarray(l.shape)])

    trainer = Trainer(config, dataset_train, dataset_test)

    log.warning("dataset: %s, learning_rate: %f", config.dataset,
                config.learning_rate)
    trainer.train(dataset_train)
예제 #12
0
def GloVe_vocab(vocab, scope='GloVe', reuse=tf.AUTO_REUSE):
    with tf.variable_scope(scope, reuse=reuse) as scope:
        glove_vocab = json.load(open(GLOVE_VOCAB_PATH, 'r'))
        used_vocab_idx = [glove_vocab['dict'][v] for v in vocab['vocab'][:-3]]

        with h5py.File(GLOVE_EMBEDDING_PATH, 'r') as f:
            glove_param = f['param'].value
        subset_param = np.take(glove_param, used_vocab_idx, axis=1)

        log.warning(scope.name)
        fixed = tf.constant(subset_param.transpose())
        learn = tf.get_variable(
            name='learn', shape=[3, 300],
            initializer=tf.random_uniform_initializer(
                minval=-0.01, maxval=0.01))
        embed_map = tf.concat([fixed, learn], axis=0)
        return embed_map
예제 #13
0
def V2L(feat_V, enc_dim, out_dim, is_train=True, scope='V2L',
        reuse=tf.AUTO_REUSE):
    with tf.variable_scope(scope, reuse=reuse) as scope:
        log.warning(scope.name)
        h1 = fc_layer(
            feat_V, enc_dim, use_bias=True, use_bn=False,
            activation_fn=tf.nn.tanh, is_training=is_train,
            scope='fc_1', reuse=reuse)
        h2 = fc_layer(
            h1, enc_dim, use_bias=True, use_bn=False,
            activation_fn=tf.nn.tanh, is_training=is_train,
            scope='fc_2', reuse=reuse)
        map_L = fc_layer(
            h2, out_dim, use_bias=True, use_bn=False,
            activation_fn=None, is_training=is_train,
            scope='Linear', reuse=reuse)
        return map_L, [h1, h2, map_L]
예제 #14
0
def attention_pooling(memory, score, scope='attention_pooling'):
    """
    Args:
        - memory: [bs, len, dim]
        - score: [bs, len]
    Returns:
        - pooled_memory: [bs, dim]
    """
    with tf.name_scope(scope):
        log.warning(scope)
        expanded_score = tf.expand_dims(score, axis=1)
        # expanded_score shape is [bs, 1, len]
        # memory shape is [bs, len, dim]
        pooled_memory = tf.matmul(expanded_score, memory)
        # pooled memory shape is [bs, 1, dim]
        pooled_memory = tf.squeeze(pooled_memory, axis=1)
    return pooled_memory
def main():
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('--batch_size', type=int, default=1)
    parser.add_argument('--prefix', type=str, default='default')
    parser.add_argument('--checkpoint_path', type=str, default=None)
    parser.add_argument('--train_dir', type=str)
    parser.add_argument('--dataset', type=str, default='MNIST', choices=['MNIST', 'SVHN', 'CIFAR10'])
    parser.add_argument('--reconstruct', action='store_true', default=False)
    parser.add_argument('--generate', action='store_true', default=False)
    parser.add_argument('--interpolate', action='store_true', default=False)
    parser.add_argument('--recontrain', action='store_true', default=False)  
    parser.add_argument('--data_id', nargs='*', default=None)
    parser.add_argument('--few_shot_class', type=int, default=None)
    parser.add_argument('--few_shot_cap', type=int, default=False) 
    parser.add_argument('--train_sample_cap', type=int, default=None)
    parser.add_argument('--test_sample_cap', type=int, default=None)    
    parser.add_argument('--weight_multiplier', type=int, default=1)
    parser.add_argument('--alpha', type=float, default=None)
    parser.add_argument('--ignore_weighting', action='store_true', default=False)
    config = parser.parse_args()

    if config.dataset == 'MNIST':
        import sys
        sys.path.insert(1, '/scratch')
        import datasets.mnist as dataset
    elif config.dataset == 'SVHN':
        import datasets.svhn as dataset
    elif config.dataset == 'CIFAR10':
        import datasets.cifar10 as dataset
    else:
        raise ValueError(config.dataset)

    config.conv_info = dataset.get_conv_info()
    config.deconv_info = dataset.get_deconv_info()
    dataset_train, dataset_test = dataset.create_default_splits(config)


    m, l = dataset_train.get_data(dataset_train.ids[0])
    config.data_info = np.concatenate([np.asarray(m.shape), np.asarray(l.shape)])

    evaler = Evaler(config, dataset_test, dataset_train)

    log.warning("dataset: %s", config.dataset)
    with tf.device('/GPU:0'):
        evaler.eval_run()
예제 #16
0
def encode_L(seq, seq_len, dim=384, scope='encode_L',
             reuse=tf.AUTO_REUSE, cell_type='LSTM'):
    with tf.variable_scope(scope, reuse=reuse) as scope:
        log.warning(scope.name)
        if cell_type == 'LSTM':
            cell = rnn.BasicLSTMCell(num_units=dim, state_is_tuple=True)
        elif cell_type == 'GRU':
            cell = rnn.GRUCell(num_units=dim)
        else: raise ValueError('Unknown cell_type')
        _, final_state = tf.nn.dynamic_rnn(
            cell=cell, dtype=tf.float32, sequence_length=seq_len,
            inputs=seq)
        if cell_type == 'LSTM':
            out = final_state.h
        elif cell_type == 'GRU':
            out = final_state
        return out
예제 #17
0
def L2V(feat_L, enc_dim, out_dim, is_train=True, scope='L2V',
        reuse=tf.AUTO_REUSE):
    with tf.variable_scope(scope, reuse=reuse) as scope:
        log.warning(scope.name)
        h = fc_layer(
            feat_L, enc_dim, use_bias=True, use_bn=False,
            activation_fn=tf.nn.relu, is_training=is_train,
            scope='fc_1', reuse=reuse)
        h = fc_layer(
            h, enc_dim, use_bias=True, use_bn=False,
            activation_fn=tf.nn.relu, is_training=is_train,
            scope='fc_2', reuse=reuse)
        map_V = fc_layer(
            h, out_dim, use_bias=True, use_bn=False,
            activation_fn=None, is_training=is_train,
            scope='Linear', reuse=reuse)
        return map_V
예제 #18
0
def check_switch_scale(client: Client) -> State:
    """per-switch scale"""
    # Verify counts from ctxClassCnt are < limits from fvcapRule
    from collections import defaultdict

    metrics = defaultdict(lambda: defaultdict(dict))
    # map ctxClassCnt counts to fvcapRule limits
    count_to_limit = {"l2BD": "fvBD", "fvEpP": "fvCEp", "l3Dom": "fvCtx"}
    # Build dict with device/mo/metric
    counts = client.get_class("ctxClassCnt",
                              params={"rsp-subtree-class": "l2BD,fvEpP,l3Dom"})
    for record in counts:
        node_dn = get_node_dn(record["dn"])
        key = count_to_limit.get(record["name"])
        if key:
            metrics[node_dn][key]["count"] = get_path(int, record, "count")

    # Add limits to the metrics dict
    limits = client.get_class("fvcapRule", cache=True)
    for record in limits:
        if record["dn"].startswith("topology"):
            node_dn = get_node_dn(record["dn"])
            subj = record["subj"]
            if node_dn in metrics and subj in count_to_limit.values():
                limit = get_path(int, record, "constraint")
                metrics[node_dn][subj]["limit"] = limit

    # Validate metrics
    over_limit = False
    for node_dn, by_mo in metrics.items():
        for mo, metric in by_mo.items():
            count = metric.get("count", 0)
            limit = metric.get("limit", 0)
            if count > 0 and count >= limit:
                over_limit = True
                log.warning(f"Over scale limit on {node_dn}",
                            mo=mo,
                            count=count,
                            limit=limit)
            if client.args["debug"]:
                log.debug(f"Scale metric on {node_dn}:",
                          mo=mo,
                          count=count,
                          limit=limit)
    return State.FAIL if over_limit else State.OK
예제 #19
0
def decode_L(inputs, dim, embed_map, start_token,
             unroll_type='teacher_forcing', seq=None, seq_len=None,
             end_token=None, max_seq_len=None, output_layer=None,
             is_train=True, scope='decode_L', reuse=tf.AUTO_REUSE):

    with tf.variable_scope(scope, reuse=reuse) as scope:
        init_c = fc_layer(inputs, dim, use_bias=True, use_bn=False,
                          activation_fn=tf.nn.tanh, is_training=is_train,
                          scope='Linear_c', reuse=reuse)
        init_h = fc_layer(inputs, dim, use_bias=True, use_bn=False,
                          activation_fn=tf.nn.tanh, is_training=is_train,
                          scope='Linear_h', reuse=reuse)
        init_state = rnn.LSTMStateTuple(init_c, init_h)
        log.warning(scope.name)

        start_tokens = tf.zeros(
            [tf.shape(inputs)[0]], dtype=tf.int32) + start_token
        if unroll_type == 'teacher_forcing':
            if seq is None: raise ValueError('seq is None')
            if seq_len is None: raise ValueError('seq_len is None')
            seq_with_start = tf.concat([tf.expand_dims(start_tokens, axis=1),
                                        seq[:, :-1]], axis=1)
            helper = seq2seq.TrainingHelper(
                tf.nn.embedding_lookup(embed_map, seq_with_start), seq_len)
        elif unroll_type == 'greedy':
            if end_token is None: raise ValueError('end_token is None')
            helper = seq2seq.GreedyEmbeddingHelper(
                lambda e: tf.nn.embedding_lookup(embed_map, e),
                start_tokens, end_token)
        else:
            raise ValueError('Unknown unroll_type')

        cell = rnn.BasicLSTMCell(num_units=dim, state_is_tuple=True)
        decoder = seq2seq.BasicDecoder(cell, helper, init_state,
                                       output_layer=output_layer)
        outputs, _, pred_length = seq2seq.dynamic_decode(
            decoder, maximum_iterations=max_seq_len,
            scope='dynamic_decoder')

        output = outputs.rnn_output
        pred = outputs.sample_id

        return output, pred, pred_length
예제 #20
0
    def result(self):
        from run import app
        newsid = self.args["newsid"]
        app.mongo["searchengine"]["news-backtracking"].update(
            {"newsid": bson.int64.Int64(newsid)}, {"$set": {
                "status": 1
            }})
        try:
            parser = Parser(self.args['uri'], self.html).result
            parser.pop("newdetail")
        except AttributeError as err:
            log.debug("{}: {}".format(self.args["url"], err))
            return
        except Exception as err:
            log.warning("{}: {}".format(self.args["url"], err))
            return

        parser['newsid'] = newsid
        parser['from_title'] = self.args['title']
        parser['sentence'] = self.data['sentence']
        try:
            # app.mongo["searchengine"]["redetail"].insert(parser)
            parser["uri"] = self.args["uri"]
            parser["url"] = self.args["url"]
            parser["url"] = self.args["url"].replace("https://", "").replace(
                "http://", "")
            # app.mongo["searchengine"]["docs"].insert(self.data)
            app.mongo["searchengine"]["detail"].insert(parser)
            insert_middle_table(parser["uri"], parser['publish_info'],
                                app.mongo["searchengine"]["middlehaonew"],
                                parser["url"], parser["uid"])
            log.info(
                "newsid :{}, sign: {}, title: {}, from: {}, url: {}".format(
                    *self.args.values()))
            app.elastic.index(index='searchengine',
                              doc_type='docs',
                              body=self.args)

        except Exception as err:
            log.info("{}: {}".format(self.data['url'], err))
        self.data.clear()
        self.args.clear()
        parser.clear()
예제 #21
0
def main():

    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
    tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)

    config, model, dataset_train, dataset_val, dataset_test = argparser(
        is_train=False)
    log.warning("dataset path: %s", config.dataset_path)

    #viewer_val = Visualizer(config, model, dataset_val, 'val')
    #viewer_val.vis_run()
    #config.batch_size = viewer_val.batch_size

    #viewer_train = Visualizer(config, model, dataset_train, 'train')
    #viewer_train.vis_run()
    #config.batch_size = viewer_train.batch_size

    viewer_test = Visualizer(config, model, dataset_test, 'test')
    viewer_test.vis_run()
예제 #22
0
        def C(img, q, scope):
            with tf.compat.v1.variable_scope(scope) as scope:
                log.warning(scope.name)

                conv_1 = Conv2D(32,
                                kernel_size=5,
                                strides=3,
                                activation=tf.nn.relu,
                                padding=self.padding,
                                name="conv_1")(img)
                bn_1 = BatchNormalization(name="bn_1")(conv_1)
                conv_2 = Conv2D(64,
                                kernel_size=5,
                                strides=3,
                                activation=tf.nn.relu,
                                padding=self.padding,
                                name="conv_2")(bn_1)
                bn_2 = BatchNormalization(name="bn_2")(conv_2)
                conv_3 = Conv2D(128,
                                kernel_size=5,
                                strides=2,
                                activation=tf.nn.relu,
                                padding=self.padding,
                                name="conv_3")(bn_2)
                bn_3 = BatchNormalization(name="bn_3")(conv_3)
                conv_4 = Conv2D(256,
                                kernel_size=5,
                                strides=2,
                                activation=tf.nn.relu,
                                padding=self.padding,
                                name="conv_4")(bn_3)
                bn_4 = BatchNormalization(name="bn_4")(conv_4)
                flat = Flatten(name="flatten")(bn_4)
                flat = tf.keras.backend.repeat_elements(flat, 23, axis=0)
                conv_q = tf.concat([flat, q], axis=1)
                fc_1 = Dense(2000, activation=tf.nn.relu, name="fc_1")(conv_q)
                fc_2 = Dense(1000, activation=tf.nn.relu, name="fc_2")(fc_1)
                fc_3 = Dense(500, activation=tf.nn.relu, name="fc_3")(fc_2)
                fc_4 = Dense(100, activation=tf.nn.relu, name="fc_4")(fc_3)
                _logits = Dense(self.ans_dim, activation=None,
                                name="logits")(fc_4)
                return _logits
예제 #23
0
def glove_embedding_map(used_vocab,
                        scope='glove_embedding_map',
                        reuse=tf.AUTO_REUSE):
    with tf.variable_scope(scope, reuse=reuse) as scope:
        log.warning(scope.name)
        with h5py.File(GLOVE_EMBEDDING_PATH, 'r') as f:
            fixed = tf.constant(f['param'].value.transpose())
        learn = tf.get_variable(name='learn',
                                shape=[3, 300],
                                initializer=tf.random_uniform_initializer(
                                    minval=-0.01, maxval=0.01))
        embed_map = tf.concat([fixed, learn], axis=0)

        glove_vocab = json.load(open(GLOVE_VOCAB_PATH, 'r'))
        selected_index = tf.constant(
            [glove_vocab['dict'][v] for v in used_vocab['vocab']],
            dtype=tf.int32)
        selected_embed_map = tf.nn.embedding_lookup(embed_map, selected_index)

        return selected_embed_map
예제 #24
0
def fc_layer(input, dim, use_bias=False, use_bn=False, use_ln=False, activation_fn=None,
             is_training=True, scope='fc_layer', reuse=tf.AUTO_REUSE):
    with tf.variable_scope(scope, reuse=reuse) as scope:
        log.warning(scope.name)
        if use_bias:
            out = layers.fully_connected(
                input, dim, activation_fn=None, reuse=reuse,
                trainable=is_training, scope='fc')
        else:
            out = layers.fully_connected(
                input, dim, activation_fn=None, biases_initializer=None,
                reuse=reuse, trainable=is_training, scope='fc')
        if use_bn:
            out = layers.batch_norm(out, center=True, scale=True, decay=0.9,
                                    is_training=is_training,
                                    updates_collections=None)
        if use_ln:
            out = layers.layer_norm(out)
        if activation_fn is not None:
            out = activation_fn(out)
        return out
예제 #25
0
def conv2d(input, dim, kernel_size, pad='same', use_bias=False, use_bn=False,
           activation_fn=None, is_training=True, scope='conv2d',
           reuse=tf.AUTO_REUSE):
    with tf.variable_scope(scope, reuse=reuse) as scope:
        log.warning(scope.name)
        if use_bias:
            out = layers.conv2d(input, dim, kernel_size, padding=pad,
                                activation_fn=None, reuse=reuse,
                                trainable=is_training, scope='conv2d')
        else:
            out = layers.conv2d(input, dim, kernel_size, padding=pad,
                                activation_fn=None,
                                biases_initializer=None, reuse=reuse,
                                trainable=is_training, scope='conv2d')
        if use_bn:
            out = layers.batch_norm(out, center=True, scale=True, decay=0.9,
                                    is_training=is_training,
                                    updates_collections=None)
        if activation_fn is not None:
            out = activation_fn(out)
        return out
예제 #26
0
 def runnable():
     try:
         if self._state in (State.SHUTDOWN, State.SHUTTING_DOWN):
             log.info(
                 f"connector {self._connector_instance_id} will not reconnect to router as it is being shut down"
             )
         elif self._ws_client_farm.is_safe_to_add_ws(register_uri):
             log.info(
                 f"connector {self._connector_instance_id} is adding another connectorSocket..."
             )
             self._connect_to_router(sock.register_uri, conn_info)
             self._ws_client_farm.add_ws(str(sock.register_uri))
         else:
             log.warning(
                 f"unexpected error happened, will not add websocket for this connector with "
                 f"id {self._connector_instance_id}, current websocket client farm={self._ws_client_farm}"
             )
     except Exception as e:
         log.error(
             f"can not replace socket to {register_uri}, err: {e}",
             exc_info=True)
예제 #27
0
def check_backup(client: Client) -> State:
    """last backup status"""
    # Verify executeTime is within last 24hrs for configJob
    recent_backup = False
    latest_backup = None
    for backup in client.get_class("configJob"):
        iso_backup_str = backup["executeTime"][:19]
        this_backup_time = datetime.strptime(iso_backup_str,
                                             "%Y-%m-%dT%H:%M:%S")
        if latest_backup is None or this_backup_time > latest_backup:
            latest_backup = this_backup_time
        last_24hrs = datetime.now() - timedelta(hours=24)
        if this_backup_time >= last_24hrs and backup["operSt"] == "success":
            recent_backup = True
    latest = "None" if latest_backup is None else latest_backup.isoformat()
    if not recent_backup:
        log.warning("Backup not performed within 24 hours", last_backup=latest)
        return State.FAIL
    elif client.args["debug"]:
        log.debug("Last backup performed within 24 hours", last_backup=latest)
    return State.OK
예제 #28
0
    def result(self):
        from run import app
        try:
            parser = Parser(self.args["uri"], self.html).result

        except AttributeError as err:
            log.warning("{}: {}".format(self.args["url"], err))
            return
        except Exception as err:
            log.warning("{}: {}".format(self.args["url"], err))
            return

        parser["tmd5"] = make_md5(parser["title"])
        parser["lmd5"] = get_long.get_longest_sentence(parser["newdetail"])
        parser.pop("newdetail")
        parser["uri"] = self.args["uri"]
        parser["group_id"] = parser_group_id(self.args["uri"], self.args["url"], self.html)
        parser["url"] = self.args["url"]
        try:
            app.mongo["searchengine"]["channel_docs"].insert(parser)
            log.info("parser channel: {}, {}, {}".format(parser["group_id"], parser["url"],
                                                      parser["group_id"]))
            parser["@timestamp"] = datetime.now().strftime("%Y-%m-%dT%H:%M:%S.000+0800")
            del parser["_id"]
            del parser["detail"]
            # app.redis.sadd("ChannelDocs:Queue", json.dumps({"group_id": parser["group_id"]}))
            # 把接受到的数据放入 redis: Channel 队列, 下游风华会获取 id 来查询文章具体内容
            parser["monitor_uri"] = parser["uri"]
            parser["service"] = "channel_logs"
            app.elastic.index(index='account_logs', doc_type='docs', body=parser)
        except Exception as err:
            log.warning("{}: {}".format(self.args['url'], err))

        parser.clear()
        self.args.clear()
예제 #29
0
def main():
    # 配置初始参数
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('--batch_size', type=int, default=16)
    # 前缀(代号)自定义
    parser.add_argument('--prefix', type=str, default='default')
    parser.add_argument('--checkpoint', type=str, default=None)
    parser.add_argument('--dataset',
                        type=str,
                        default='CIFAR10',
                        choices=['MNIST', 'SVHN', 'CIFAR10'])
    parser.add_argument('--learning_rate', type=float, default=1e-4)
    parser.add_argument('--update_rate', type=int, default=5)
    parser.add_argument('--lr_weight_decay',
                        action='store_true',
                        default=False)
    parser.add_argument('--dump_result', action='store_true', default=True)
    config = parser.parse_args()
    # 选择数据集
    if config.dataset == 'MNIST':
        import datasets.mnist as dataset
    elif config.dataset == 'SVHN':
        import datasets.svhn as dataset
    elif config.dataset == 'CIFAR10':
        import datasets.cifar10 as dataset
    else:
        raise ValueError(config.dataset)
    # 根据不同数据集得到不同的数据所需的配置信息
    config.data_info = dataset.get_data_info()
    config.conv_info = dataset.get_conv_info()
    config.deconv_info = dataset.get_deconv_info()
    dataset_train, dataset_test = dataset.create_default_splits()
    # 将配置信息和数据集初始化Trainer
    trainer = Trainer(config, dataset_train, dataset_test)

    log.warning("dataset: %s, learning_rate: %f", config.dataset,
                config.learning_rate)
    # 训练
    trainer.train()
예제 #30
0
def main():
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('--batch_size', type=int, default=64)
    parser.add_argument('--model',
                        type=str,
                        default='conv',
                        choices=['mlp', 'conv'])
    parser.add_argument('--prefix', type=str, default='default')
    parser.add_argument('--checkpoint', type=str, default=None)
    parser.add_argument('--dataset',
                        type=str,
                        default='CIFAR10',
                        choices=['MNIST', 'SVHN', 'CIFAR10'])
    parser.add_argument('--learning_rate', type=float, default=1e-4)
    parser.add_argument('--update_rate', type=int, default=5)
    parser.add_argument('--lr_weight_decay',
                        action='store_true',
                        default=False)
    config = parser.parse_args()

    if config.dataset == 'MNIST':
        import datasets.mnist as dataset
    elif config.dataset == 'SVHN':
        import datasets.svhn as dataset
    elif config.dataset == 'CIFAR10':
        import datasets.cifar10 as dataset
    else:
        raise ValueError(config.dataset)

    config.data_info = dataset.get_data_info()
    config.conv_info = dataset.get_conv_info()
    config.deconv_info = dataset.get_deconv_info()
    dataset_train, dataset_test = dataset.create_default_splits()

    trainer = Trainer(config, dataset_train, dataset_test)

    log.warning("dataset: %s, learning_rate: %f", config.dataset,
                config.learning_rate)
    trainer.train()
def main():
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('--batch_size', type=int, default=16)
    parser.add_argument('--model',
                        type=str,
                        default='relational_network',
                        choices=[
                            'relational_network', 'baseline',
                            "attentional_relational_network"
                        ])
    parser.add_argument('--prefix', type=str, default='default')
    parser.add_argument('--checkpoint', type=str, default=None)
    parser.add_argument('--dataset_path',
                        type=str,
                        default='Sort-of-CLEVR_default')
    parser.add_argument('--learning_rate', type=float, default=2.5e-4)
    parser.add_argument('--lr_weight_decay',
                        action='store_true',
                        default=False)
    config = parser.parse_args()

    path = os.path.join('./datasets', config.dataset_path)

    if check_data_path(path):
        import sort_of_clevr as dataset
    else:
        raise ValueError(path)

    config.data_info = dataset.get_data_info()
    config.conv_info = dataset.get_conv_info()
    dataset_train, dataset_val, dataset_test = dataset.create_default_splits(
        path)

    trainer = Trainer(config, dataset_train, dataset_val, dataset_test)

    log.warning("dataset: %s, learning_rate: %f", config.dataset_path,
                config.learning_rate)
    trainer.train()
예제 #32
0
def WordWeightAnswer(input, answer_dict, word_weight_dir=None,
                     use_bias=False, is_training=True,
                     scope='WordWeightAnswer',
                     weight_name='class_weights',
                     bias_name='class_biases',
                     default_bias=-100.0,
                     reuse=tf.AUTO_REUSE):
    with tf.variable_scope(scope, reuse=reuse) as scope:
        log.warning(scope.name)
        input_dim = input.get_shape().as_list()[-1]
        dim = len(answer_dict['vocab'])
        weights = np.zeros([input_dim, dim], dtype=np.float32)
        biases = np.zeros([dim], dtype=np.float32) + default_bias
        if word_weight_dir is not None:
            word_answer_dict_path = os.path.join(word_weight_dir, 'answer_dict.pkl')
            word_answer_dict = cPickle.load(open(word_answer_dict_path, 'rb'))
            word_weight_path = os.path.join(word_weight_dir, 'weights.hdf5')
            with h5py.File(word_weight_path, 'r') as f:
                answer_weight = np.array(f.get(weight_name))
                answer_bias = np.array(f.get(bias_name))

            for i, a in enumerate(answer_dict['vocab']):
                if a in word_answer_dict['dict']:
                    weights[:, i] = answer_weight[:, word_answer_dict['dict'][a]]
                    biases[i] = answer_bias[word_answer_dict['dict'][a]]
                else: pass  # initialize to zero
        if use_bias:
            out = layers.fully_connected(
                input, dim, activation_fn=None,
                weights_initializer=tf.constant_initializer(weights),
                biases_initializer=tf.constant_initializer(biases),
                reuse=reuse, trainable=is_training, scope='fc')
        else:
            out = layers.fully_connected(
                input, dim, activation_fn=None,
                weights_initializer=tf.constant_initializer(weights),
                biases_initializer=None,
                reuse=reuse, trainable=is_training, scope='fc')
        return out
예제 #33
0
def encode_L_bidirection(seq, seq_len, dim=384, scope='encode_L_bi',
                         reuse=tf.AUTO_REUSE, cell_type='LSTM'):
    with tf.variable_scope(scope, reuse=reuse) as scope:
        dim1 = int(math.ceil(dim / 2.0))
        dim2 = int(math.floor(dim / 2.0))
        log.warning(scope.name)
        if cell_type == 'LSTM':
            cell1 = rnn.BasicLSTMCell(num_units=dim1, state_is_tuple=True)
            cell2 = rnn.BasicLSTMCell(num_units=dim2, state_is_tuple=True)
        elif cell_type == 'GRU':
            cell1 = rnn.GRUCell(num_units=dim1)
            cell2 = rnn.GRUCell(num_units=dim2)
        else: raise ValueError('Unknown cell_type')
        bi_outputs, encoder_state = tf.nn.bidirectional_dynamic_rnn(
            cell_fw=cell1, cell_bw=cell2, inputs=seq, sequence_length=seq_len,
            dtype=tf.float32)
        if cell_type == 'LSTM':
            raise RuntimeError('Check how LSTM works with bidirectional rnn')
        elif cell_type == 'GRU':
            output = tf.concat(bi_outputs, -1)
            output_state = tf.concat(encoder_state, -1)
        return output, output_state