예제 #1
0
    def build_model(self, load_pretrained=True):
        # core yolo model
        input_layer = layers.Input(self.img_size)
        yolov4_output = yolov4_neck(input_layer, self.num_classes)
        self.yolo_model = models.Model(input_layer, yolov4_output)

        # Build training model
        y_true = [
            layers.Input(name='input_2',
                         shape=(52, 52, 3,
                                (self.num_classes + 5))),  # label small boxes
            layers.Input(name='input_3',
                         shape=(26, 26, 3,
                                (self.num_classes + 5))),  # label medium boxes
            layers.Input(name='input_4',
                         shape=(13, 13, 3,
                                (self.num_classes + 5))),  # label large boxes
            layers.Input(name='input_5',
                         shape=(self.max_boxes, 4)),  # true bboxes
        ]
        loss_list = tf.keras.layers.Lambda(
            yolo_loss,
            name='yolo_loss',
            arguments={
                'num_classes': self.num_classes,
                'iou_loss_thresh': self.iou_loss_thresh,
                'anchors': self.anchors
            })([*self.yolo_model.output, *y_true])
        self.training_model = models.Model([self.yolo_model.input, *y_true],
                                           loss_list)

        # Build inference model
        yolov4_output = yolov4_head(yolov4_output, self.num_classes,
                                    self.anchors, self.xyscale)
        # output: [boxes, scores, classes, valid_detections]
        self.inference_model = models.Model(
            input_layer,
            nms(yolov4_output,
                self.img_size,
                self.num_classes,
                iou_threshold=self.config['iou_threshold'],
                score_threshold=self.config['score_threshold']))

        if load_pretrained and self.weight_path and self.weight_path.endswith(
                '.weights'):
            if self.weight_path.endswith('.weights'):
                load_weights(self.yolo_model, self.weight_path)
                print(f'load from {self.weight_path}')
            elif self.weight_path.endswith('.h5'):
                self.training_model.load_weights(self.weight_path)
                print(f'load from {self.weight_path}')

        self.training_model.compile(optimizer=optimizers.Adam(lr=1e-3),
                                    loss={
                                        'yolo_loss':
                                        lambda y_true, y_pred: y_pred
                                    })
예제 #2
0
def load_G(config):
    """
    Loads a pre-trained BigGAN generator network (exponential moving average variant).
    """
    config['resolution'] = utils.imsize_dict[config['dataset']]
    config['n_classes'] = utils.nclass_dict[config['dataset']]
    config['G_activation'] = utils.activation_dict[config['G_nl']]

    config = utils.update_config_roots(config)
    device = 'cuda'

    # Seed RNG
    utils.seed_rng(config['seed'])

    # Prepare root folders if necessary
    utils.prepare_root(config)

    # Setup cudnn.benchmark for free speed
    torch.backends.cudnn.benchmark = True

    # Import the model--this line allows us to dynamically select different files.
    model = __import__(config['model'])
    experiment_name = (config['experiment_name'] if config['experiment_name']
                       else utils.name_from_config(config))

    G = model.Generator(**{**config, 'skip_init': True, 'no_optim': True}).to(device)

    # FP16? (Note: half/mixed-precision is untested with the direction discovery code)
    if config['G_fp16']:
        print('Casting G to float16...')
        G = G.half()

    print(G)
    print('Number of params in G: {}'.format(
        *[sum([p.data.nelement() for p in net.parameters()]) for net in [G]]))
    # Prepare state dict, which holds things like epoch # and itr #
    state_dict = {'itr': 0, 'epoch': 0, 'save_num': 0, 'save_best_num': 0,
                  'best_IS': 0, 'best_FID': 999999, 'config': config}

    # Load the pre-trained G_ema model as "G"
    if config['resume']:
        print('Loading weights...')
        utils.load_weights(None, None, state_dict,
                           None, None,
                           config['load_weights'] if config['load_weights'] else None,
                           G, load_optim=False, strict=False, direct_path=config['G_path'])

    G.to(device)
    # Override G's optimizer to only optimize the direction matrix A:
    for param in G.parameters():
        param.requires_grad = False
    G.optim = None
    G.eval()
    return G, state_dict, device, experiment_name
예제 #3
0
파일: sample.py 프로젝트: yqGANs/LOGAN
def run(config):
    # Prepare state dict, which holds things like epoch # and itr #
    state_dict = {'itr': 0, 'epoch': 0, 'save_num': 0, 'config': config}

    # update config (see train.py for explanation)
    config['resolution'] = 256
    config['n_classes'] = 40
    config['G_activation'] = utils.activation_dict[
        config['G_nl']]  #leaky relu for LOGAN
    config['D_activation'] = utils.activation_dict[config['D_nl']]
    config = utils.update_config_roots(config)
    config['skip_init'] = True
    config['no_optim'] = True
    device = 'cuda'

    # Seed RNG
    utils.seed_rng(config['seed'])
    # Setup cudnn.benchmark for free speed
    torch.backends.cudnn.benchmark = True

    experiment_name = (config['experiment_name']
                       if config['experiment_name'] else 'PXDgen')
    print('Experiment name is %s' % experiment_name)

    G = Generator(**config).cuda()

    # Load weights
    print('Loading weights...')
    # Here is where we deal with the ema--load ema weights or load normal weights
    utils.load_weights(G if not (config['use_ema']) else None,
                       None,
                       state_dict,
                       config['weights_root'],
                       experiment_name,
                       config['load_weights'],
                       G if config['ema'] and config['use_ema'] else None,
                       strict=False,
                       load_optim=False)

    if config['use_ema']:
        collect_bn_stats(G, 500, config, device)
    if config['G_eval_mode']:
        print('Putting G in eval mode..')
        G.eval()
    else:
        print('G is in %s mode...' % ('training' if G.training else 'eval'))

    out_dir = config['samples_root']
    if not os.path.exists(out_dir):
        os.mkdir(out_dir)

    print('Generating images..')
    generate_images(out_dir, G, config['sample_num'], config, device)
    shutil.make_archive('images', 'zip', out_dir)
예제 #4
0
    def forward(*new_params: Tensor) -> Tensor:
        load_weights(model, names, new_params)
        out = model(inputs)

        loss = criterion(out, labels)
        weight_dict = criterion.weight_dict
        final_loss = cast(
            Tensor,
            sum(loss[k] * weight_dict[k] for k in loss.keys()
                if k in weight_dict))
        return final_loss
예제 #5
0
    def __init__(self,
                 params,
                 load_pretrain=None,
                 dist_model=False,
                 demo=False):
        super(PartialCompletionContentCGAN, self).__init__()
        self.params = params
        self.with_modal = params.get('with_modal', False)

        # model
        self.model = backbone.__dict__[params['backbone_arch']](
            **params['backbone_param'])
        if load_pretrain is not None:
            assert load_pretrain.endswith(
                '.pth'), "load_pretrain should end with .pth"
            utils.load_weights(load_pretrain, self.model)

        self.model.cuda()

        if dist_model:
            self.model = utils.DistModule(self.model)
            self.world_size = dist.get_world_size()
        else:
            self.model = backbone.FixModule(self.model)
            self.world_size = 1

        self.demo = demo
        if demo:
            return

        # optim
        self.optim = torch.optim.Adam(filter(lambda p: p.requires_grad,
                                             self.model.parameters()),
                                      lr=params['lr'])

        # netD
        self.netD = backbone.__dict__[params['discriminator']](
            **params['discriminator_params'])
        self.netD.cuda()
        if dist_model:
            self.netD = utils.DistModule(self.netD)
        else:
            self.netD = backbone.FixModule(self.netD)
        self.optimD = torch.optim.Adam(self.netD.parameters(),
                                       lr=params['lr'] * params['d2g_lr'],
                                       betas=(0.0, 0.9))

        # loss
        self.criterion = InpaintingLoss(
            backbone.VGG16FeatureExtractor()).cuda()
        self.gan_criterion = AdversarialLoss(type=params['gan_type']).cuda()

        cudnn.benchmark = True
예제 #6
0
    def forward(*new_params: Tensor) -> Tensor:
        load_weights(model, names, new_params)
        mha_output, attn_weights = model(query,
                                         key,
                                         value,
                                         attn_mask=attn_mask,
                                         bias_k=bias_k,
                                         bias_v=bias_v)

        # Don't test any specific loss, just backprop ones for both outputs
        loss = mha_output.sum() + attn_weights.sum()

        return loss
예제 #7
0
def main(save_filename=None,
         load_filename="simple_rnn_custom_model_weights.h5",
         do_train=False,
         num_epochs=2,
         cell_type='gru'):
    """ Entry point """
    if do_train:
        print("Training and saving model...")
        (model, vocab) = train_model(file_name=save_filename,
                                     num_epochs=num_epochs)
        ids_from_chars = preprocessing.StringLookup(vocabulary=list(vocab))
        vocab_size = len(ids_from_chars.get_vocabulary())
    else:
        if load_filename is None:
            print(
                "ERROR load file name not provided and training flag set to false, no model can be used"
            )
            return 1
        # TODO Somehow this vocab should be accessible without needed to read and process this data
        data = open('./archive/drake_lyrics.txt').read()
        print('Length of text: {} characters'.format(len(data)))
        vocab = sorted(set(data))
        ids_from_chars = preprocessing.StringLookup(vocabulary=list(vocab))
        vocab_size = len(ids_from_chars.get_vocabulary())
        print("Loading model from disk...")
        #cell = custom_models.MyRNNCell(vocab_size)
        cell = custom_models.MyGRUCell(vocab_size)
        model = custom_models.MyCellModelWrapper(cell)
        utils.load_weights(load_filename, model,
                           tf.TensorShape([1, seq_length, vocab_size]))
    print("Generating Bars...please wait")
    seed_texts = [
        "[Verse]", "you", "love", "boy", "I love", "I love you", "Kiki, ",
        "Swanging"
    ]
    for seed in seed_texts:
        num_chars = 400
        output_text = utils.generate_text_one_h(seed,
                                                model,
                                                seq_length,
                                                ids_from_chars,
                                                chars_to_gen=num_chars)
        print(">>>>>>>>>>>>>>>>>>>>")
        print("Input seed: %s" % (seed))
        print("%d character generated sequence:\n%s\n" %
              (num_chars, output_text))
        print("<<<<<<<<<<<<<<<<<<<<")
        print("End of output for seed: %s" % (seed))
    #Hope you enjoyed :)
    return 0
예제 #8
0
def main(argv=None):
    physical_devices = tf.config.experimental.list_physical_devices('GPU')
    if len(physical_devices) > 0:
        tf.config.experimental.set_memory_growth(physical_devices[0], True)

    if FLAGS.tiny:
        model = yolo_v3_tiny.yolo_v3_tiny
    elif FLAGS.spp:
        model = yolo_v3.yolo_v3_spp
    else:
        model = yolo_v3.yolo_v3

    classes = load_coco_names(FLAGS.class_names)

    # placeholder for detector inputs
    # any size > 320 will work here
    inputs = tf.placeholder(tf.float32, [None, 416, 416, 3])

    with tf.variable_scope('detector'):
        detections = model(inputs, len(classes), data_format=FLAGS.data_format)
        load_ops = load_weights(tf.global_variables(scope='detector'),
                                FLAGS.weights_file)

    saver = tf.train.Saver(tf.global_variables(scope='detector'))

    with tf.Session() as sess:
        sess.run(load_ops)

        save_path = saver.save(sess, save_path=FLAGS.ckpt_file)
        print('Model saved in path: {}'.format(save_path))
def main(argv=None):
    if FLAGS.tiny:
        model = yolo_v3_tiny.yolo_v3_tiny
    else:
        model = yolo_v3.yolo_v3

    # Load coco classes
    classes = load_coco_names(FLAGS.class_names)

    # Placeholder for detector inputs any size > 320 will work here
    inputs = tf.placeholder(tf.float32, [None, 416, 416, 3])

    with tf.variable_scope('detector'):
        # Initialize model with required input size.
        detections = model(inputs, len(classes), data_format=FLAGS.data_format)

        # Load weights file into the model
        load_ops = load_weights(tf.global_variables(scope='detector'),
                                FLAGS.weights_file)

    # Initialize model saver module
    saver = tf.train.Saver(tf.global_variables(scope='detector'))

    with tf.Session() as sess:
        # Run load_weight function
        sess.run(load_ops)

        # Save the loaded model into a proper TF file.
        save_path = saver.save(sess, save_path=FLAGS.ckpt_file)
        print('Model saved in path: {}'.format(save_path))
예제 #10
0
def main(argv=None):
    if FLAGS.tiny:
        model = yolo_v3_tiny.yolo_v3_tiny
        print ('doing tiny')
    else:
        model = yolo_v3.yolo_v3

    classes = load_coco_names(FLAGS.class_names)
    print ('num classes',len(classes))

    # placeholder for detector inputs
    inputs = tf.placeholder(tf.float32, [None, FLAGS.size, FLAGS.size, 3], "inputs")

    with tf.variable_scope('detector'):
        detections = model(inputs, len(classes), data_format=FLAGS.data_format)
        load_ops = load_weights(tf.global_variables(scope='detector'), FLAGS.weights_file)

    #detect_1.shape = (?, 507, 85)
    #detect_2.shape = (?, 2028, 85)
    #detect_3.shape = (?, 8112, 85)
    #detections.shape = (?, 10647, 85)
    #detections = Tensor("detector/yolo-v3/detections:0", shape=(?, 10647, 85), dtype=float32)
    print("detections.shape =", detections.shape)
    print(detections)
    print(detections.name)
    # Sets the output nodes in the current session
    boxes = detections_boxes(detections)

    with tf.Session() as sess:
        sess.run(load_ops)
        freeze_graph(sess, FLAGS.output_graph, FLAGS.tiny)
def plot_eigenvalues_old(weights_path, n_eigenvalues=None, ax=None, **kwargs):
    warnings.warn('deprecated', DeprecationWarning)

    loaded_weights = load_weights(weights_path)

    G = nx.from_scipy_sparse_matrix(weights_to_graph(loaded_weights))
    G_nn = G.subgraph(max(nx.connected_components(G), key=len))
    assert nx.is_connected(G_nn)

    nrom_laplacian_matrics = nx.normalized_laplacian_matrix(G_nn)
    eigen_values = np.sort(np.linalg.eigvals(nrom_laplacian_matrics.A))

    if n_eigenvalues == None:
        start, end = 0, len(G_nn)
    elif isinstance(n_eigenvalues, int):
        start, end = 0, n_eigenvalues
    elif isinstance(n_eigenvalues, tuple):
        start, end = n_eigenvalues
    else:
        raise TypeError(
            'n_eigenvalues should be either None or int or tuple or slice.')

    eigen_values = eigen_values[start:end]

    if ax is None:
        _, ax = plt.subplots(1)

    ax.xaxis.set_major_locator(MaxNLocator(integer=True))

    if 'linestyle' not in kwargs:
        kwargs['linestyle'] = 'none'
        kwargs['marker'] = '*'
        kwargs['markersize'] = 5

    return ax.plot(range(start + 1, end + 1), eigen_values, **kwargs)
예제 #12
0
def analysis(opts):
    opts.load()
    opts.n_examples = opts.batch_size
    opts.test = True  # n_examples is set by possible angle directions
    opts.test_type = 'full'  # full, 2-leg
    opts.r0 = 10
    opts.r1 = 5
    opts.agent_steps = 100
    opts.max_angle_change = 45

    W_ah, W_sh, W_hh, W_out, bias = utils.load_weights(opts.folder, opts.weights_name)
    inputs, labels, pred, states, angle_trig, opts = train.run_test(opts)

    titles = ['W_ah', 'W_sh', 'W_hh', 'W_out']
    mat = [W_ah, W_sh, W_hh, W_out]
    tup = zip(titles, mat)
    fname = 'figures/' + opts.get_path() + '_unsorted'
    # utils.pretty_image(tup, 2, 2, fname, cbar=True)

    # in labels and predictions, data axis 2 has angles (radians) and distance
    ix = 14
    # print('labels', labels[ix])
    # print('pred', pred[ix])
    if opts.output_format == 'polar':
        visualize_polar_trajectory(pred, angle_trig, ix, opts)
        # visualize_trajectory(labels, angle_trig, ix, opts)
    else:
        visualize_cartesian_trajectory(pred, angle_trig, ix, opts)
    plt.show()
예제 #13
0
def main(argv=None):
    if FLAGS.tiny:
        model = yolo_v3_tiny.yolo_v3_tiny
    elif FLAGS.spp:
        model = yolo_v3.yolo_v3_spp
    else:
        model = yolo_v3.yolo_v3

    classes = load_coco_names(FLAGS.class_names)

    # placeholder for detector inputs
    inputs = tf.placeholder(
        tf.float32, [None, FLAGS.size, FLAGS.size, 3], "inputs")

    with tf.variable_scope('detector'):
        detections = model(inputs, len(classes), data_format=FLAGS.data_format)
        load_ops = load_weights(tf.global_variables(
            scope='detector'), FLAGS.weights_file)

    # Sets the output nodes in the current session
    boxes = detections_boxes(detections)

    with tf.Session() as sess:
        sess.run(load_ops)
        savepb(sess, FLAGS.output_graph)
예제 #14
0
def main(argv=None):
    if FLAGS.tiny:
        model = yolo_v3_tiny.yolo_v3_tiny
    elif FLAGS.spp:
        model = yolo_v3.yolo_v3_spp
    else:
        model = yolo_v3.yolo_v3

    classes = load_coco_names(FLAGS.class_names)

    # placeholder for detector inputs
    inputs = tf.placeholder(tf.float32, [None, FLAGS.size, FLAGS.size, 3],
                            "inputs")

    with tf.variable_scope('detector'):
        detections = model(
            inputs, len(classes), data_format=FLAGS.data_format
        )  # 得到yolov3整体模型(包含模型输出(?, 10647, (num_classes + 5)))
        load_ops = load_weights(tf.global_variables(scope='detector'),
                                FLAGS.weights_file)

    # Sets the output nodes in the current session
    boxes = detections_boxes(
        detections)  # 1,将整体输出分解为box结果与概率数值结果;2、将结果名称定义为output_boxes再放入graph中

    with tf.Session() as sess:
        sess.run(load_ops)
        reader = pywrap_tensorflow.NewCheckpointReader(checkpoint_path)
        var_to_shape_map = reader.get_variable_to_shape_map()
        for key in var_to_shape_map:
            print("tensor_name: ", key)
        freeze_graph(sess, FLAGS.output_graph)
def draw_clustered_mlp(weights_path,
                       clustering_result,
                       n_clusters=4,
                       is_first_square=True,
                       ax=None):
    """Draw MLP with its spectral clustering."""

    weights = load_weights(weights_path)
    layer_widths = extact_layer_widths(weights)

    labels, metrics = clustering_result

    G = nx.from_scipy_sparse_matrix(weights_to_graph(weights))

    pos = set_nodes_positions(G.nodes, layer_widths, labels, is_first_square)

    color_mapper = get_color_mapper(n_clusters)

    color_map = [color_mapper[label] for label in labels]

    if ax is None:
        _, ax = plt.subplots(1)

    with warnings.catch_warnings():
        warnings.simplefilter('ignore')
        nx.draw(G, pos=pos, node_color=color_map, width=0, node_size=10, ax=ax)

    draw_metrics(metrics, ax)

    return ax, labels, metrics
예제 #16
0
def main(argv=None):
    if FLAGS.tiny:
        model = yolov3_tiny_3l.yolo_v3_tiny
    elif FLAGS.dense:
        model = dense_yolov3_v1.dense_yolo_v3
    else:
        model = yolo_v3.yolo_v3

    classes = load_names(FLAGS.class_names)

    # placeholder for detector inputs
    inputs = tf.placeholder(tf.float32, [None, FLAGS.size, FLAGS.size, 3],
                            "inputs")

    with tf.variable_scope('detector'):
        detections = model(inputs, len(classes), data_format=FLAGS.data_format)
        load_ops = load_weights(tf.global_variables(scope='detector'),
                                FLAGS.weights_file)

    # Sets the output nodes in the current session
    boxes = detections_boxes(detections)

    with tf.Session() as sess:
        sess.run(load_ops)
        freeze_graph(sess, FLAGS.output_graph)
        writer = tf.summary.FileWriter("logs/", sess.graph)
예제 #17
0
def main(argv=None):
    if FLAGS.tiny:
        model = yolo_v3_tiny.yolo_v3_tiny
    else:
        model = yolo_v3.yolo_v3

    classes = load_coco_names(FLAGS.class_names)

    # placeholder for detector inputs
    # any size > 320 will work here
    inputs = tf.placeholder(tf.float32, [None, 416, 416, 3])

    with tf.variable_scope('detector'):
        detections = model(inputs, len(classes),
                           data_format=FLAGS.data_format)
        load_ops = load_weights(tf.global_variables(
            scope='detector'), FLAGS.weights_file)

    saver = tf.train.Saver(tf.global_variables(scope='detector'))

    with tf.Session() as sess:
        sess.run(load_ops)

        save_path = saver.save(sess, save_path=FLAGS.ckpt_file)
        print('Model saved in path: {}'.format(save_path))
def main(argv=None):
    if FLAGS.tiny:
        model = yolo_v3_tiny.yolo_v3_tiny
    elif FLAGS.spp:
        model = yolo_v3.yolo_v3_spp
    else:
        model = yolo_v3.yolo_v3

    classes = load_coco_names(FLAGS.class_names)

    # 定义网络对外的接口, 服务器端是自动base64解码,所以拿到的数据已经解码过了
    # 但还需将byte格式转换为图像矩阵格式
    jpeg_vec_bytes = tf.placeholder(tf.string, shape=None, name=None)
    jpeg_sca_bytes = tf.reshape(jpeg_vec_bytes, [])  #
    jpeg_ndarr = tf.image.decode_jpeg(jpeg_sca_bytes, fancy_upscaling=False) # 从字符串变为数组,且将标量形式字节流解码成图片,!!!这里参数必须设置成False否则不能与客服端的结果匹配
    jpeg_ndarr = tf.image.resize_images(jpeg_ndarr,  [FLAGS.size, FLAGS.size], method=0)    # 将图片拉伸成希望的尺寸
    inputs = tf.reshape(jpeg_ndarr, [1, FLAGS.size, FLAGS.size, 3], "inputs")
    # placeholder for detector inputs 原址输入参量处
    # inputs = tf.placeholder(tf.float32, [None, FLAGS.size, FLAGS.size, 3], "inputs")
    # 加载yolov3模型
    with tf.variable_scope('detector'):
        detections = model(inputs, len(classes), data_format=FLAGS.data_format) # 得到yolov3整体模型(包含模型输出(?, 10647, (num_classes + 5)))
        load_ops = load_weights(tf.global_variables(scope='detector'), FLAGS.weights_file)
    # Sets the output nodes in the current session
    boxes = detections_boxes(detections) # 1,将整体输出分解为box结果与概率数值结果;2、将结果名称定义为output_boxes再放入graph中
    # checkpoint读取
    with tf.Session() as sess:
        sess.run(load_ops)
        reader = pywrap_tensorflow.NewCheckpointReader(checkpoint_path)
        var_to_shape_map = reader.get_variable_to_shape_map()
        for key in var_to_shape_map:
            print("tensor_name: ", key)
        #############################################
        # output_node_names = ["output_boxes","inputs",]
        # output_node_names = ",".join(output_node_names)
        #
        # output_graph_def = tf.graph_util.convert_variables_to_constants(
        #     sess,tf.get_default_graph().as_graph_def(),output_node_names.split(","))
        #
        # with tf.gfile.GFile(FLAGS.output_graph, "wb") as f:
        #     f.write(output_graph_def.SerializeToString())
        # print("{} ops written to {}.".format(len(output_graph_def.node), FLAGS.output_graph))
        #############################################
        # pb_savemodel模式存储
        export_path = 'models/pb/20191226'
        builder = tf.saved_model.builder.SavedModelBuilder(export_path)

        images = tf.saved_model.utils.build_tensor_info(jpeg_vec_bytes)
        boxes = tf.saved_model.utils.build_tensor_info(boxes)
        prediction_signature = (
            tf.saved_model.signature_def_utils.build_signature_def(
                inputs={'images': images},
                outputs={'scores': boxes},
                method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME))
        builder.add_meta_graph_and_variables(
            sess, [tf.saved_model.tag_constants.SERVING],
            signature_def_map={'predict_images': prediction_signature},
            main_op=tf.tables_initializer(),
            strip_default_attrs=True)
        builder.save()
예제 #19
0
def main(args):
    # 数据加载
    (x_train, y_train), (x_test, y_test) = load_cifar(args.cifar_root)

    # 随机选择训练样本
    train_num = x_train.shape[0]

    def next_batch(batch_size):
        idx = np.random.choice(train_num, batch_size)
        return x_train[idx], y_train[idx]

    # 网络
    vgg = VGG(image_size=32, name='vgg11')
    opt = RmsProp(vgg.weights, lr=args.lr, decay=1e-3)

    # 加载权重
    if args.checkpoint:
        weights = load_weights(args.checkpoint)
        vgg.load_weights(weights)
        print("load weights done")

    # 评估
    if args.eval_only:
        indices = np.random.choice(len(x_test), args.eval_num, replace=False)
        print('{} start evaluate'.format(
            time.asctime(time.localtime(time.time()))))
        acc = get_accuracy(vgg, x_test[indices], ys=y_test[indices])
        print('{} acc on test dataset is :{:.3f}'.format(
            time.asctime(time.localtime(time.time())), acc))
        return

    # 训练
    num_steps = args.steps
    for step in range(num_steps):
        x, y_true = next_batch(args.batch_size)
        # 前向传播
        y_predict = vgg.forward(x.astype(np.float))
        # print('y_pred: min{},max{},mean:{}'.format(np.min(y_predict, axis=-1),
        #                                            np.max(y_predict, axis=-1),
        #                                            np.mean(y_predict, axis=-1)))
        # print('y_pred: {}'.format(y_predict))
        acc = np.mean(
            np.argmax(y_predict, axis=1) == np.argmax(y_true, axis=1))
        # 计算loss
        loss, gradient = cross_entropy_loss(y_predict, y_true)

        # 反向传播
        vgg.backward(gradient)
        # 更新梯度
        opt.iterate(vgg)

        # 打印信息
        print('{} step:{},loss:{:.4f},acc:{:.4f}'.format(
            time.asctime(time.localtime(time.time())), step, loss, acc))

        # 保存权重
        if step % 100 == 0:
            save_weights(
                os.path.join(args.save_dir, 'weights-{:03d}.pkl'.format(step)),
                vgg.weights)
예제 #20
0
def main(argv=None):
    if FLAGS.tiny:
        model = yolo_v3_tiny.yolo_v3_tiny
    else:
        model = yolo_v3.yolo_v3

    config = configparser.ConfigParser(strict=False)
    config.read(FLAGS.model_config)

    classes = load_coco_names(FLAGS.class_names)
    # placeholder for detector inputs
    inputs = tf.placeholder(tf.float32, [
        None,
        config.getint("net", "height"),
        config.getint("net", "width"), 3
    ], "inputs")

    with tf.variable_scope('detector'):
        detections = model(inputs, len(classes), data_format=FLAGS.data_format)
        load_ops = load_weights(tf.global_variables(scope='detector'),
                                FLAGS.weights_file)
    # Sets the output nodes in the current session
    boxes = detections_boxes(detections)

    with tf.Session() as sess:
        sess.run(load_ops)
        freeze_graph(sess, FLAGS.output_graph)
예제 #21
0
def test(build_model, dataset, hparams, logdir):
    # Check if the given directory already contains model
    if os.path.exists(os.path.join(logdir, "stats.json")):
        # then mark this as the directory to load weights from
        model_dir = logdir
    else:
        # Raise Error
        raise RuntimeError(f"No valid model stats file found in {logdir}")
    model_path = os.path.join(model_dir, "weights.h5")

    # Build model
    model = build_model(hparams, **dataset.preprocessing.kwargs)

    # Compile model
    model.compile(optimizer=optimizers.make_optimizer(hparams.optimizer,
                                                      hparams.opt_param),
                  loss="categorical_crossentropy",
                  metrics=["categorical_accuracy"])

    # Print Summary of models
    lq.models.summary(model)

    # Load model weights from the specified file
    print("Before loading...")
    for l in model.layers:
        for _w in l.trainable_weights:
            print("{:40s}".format(l.name, _w.name),
                  tf.keras.backend.get_value(_w).flatten()[:3])
    # model.load_weights(model_path)
    utils.load_weights(model, model_path)
    print("After loading...")
    for l in model.layers:
        for _w in l.trainable_weights:
            print("{:25s}".format(l.name, _w.name),
                  tf.keras.backend.get_value(_w).flatten()[:3])

    # Test this model
    test_log = model.evaluate(dataset.test_data(hparams.batch_size),
                              steps=dataset.test_examples //
                              hparams.batch_size)

    data = [["Metric", "Value"]]
    for (idx, metric) in enumerate(model.metrics_names):
        data.append([metric, test_log[idx]])

    from terminaltables import AsciiTable
    print(AsciiTable(data, title="Test Statistics").table)
def build_cluster_graph(weights_path, labels, normalize_in_out=True):

    weights = load_weights(weights_path)
    layer_widths = extact_layer_widths(weights)

    G = nx.DiGraph()

    (label_by_layer, current_label_by_layer,
     next_label_by_layer) = it.tee(splitter(labels, layer_widths), 3)

    next_label_by_layer = it.islice(next_label_by_layer, 1, None)

    for layer_index, layer_labels in enumerate(label_by_layer):
        unique_labels = sorted(label for label in np.unique(layer_labels)
                               if label != -1)
        for label in unique_labels:
            node_name = nodify(layer_index, label)
            G.add_node(node_name)

    edges = {}

    for layer_index, (current_labels, next_labels, layer_weights) in enumerate(
            zip(current_label_by_layer, next_label_by_layer, weights)):

        label_edges = it.product(
            (label for label in np.unique(current_labels) if label != -1),
            (label for label in np.unique(next_labels) if label != -1))

        for current_label, next_label in label_edges:

            current_mask = (current_label == current_labels)
            next_mask = (next_label == next_labels)

            between_weights = layer_weights[current_mask, :][:, next_mask]

            if normalize_in_out:
                n_weight_in, n_weight_out = between_weights.shape
                n_weights = n_weight_in * n_weight_out
                normalization_factor = n_weights
            else:
                normalization_factor = 1

            edge_weight = np.abs(between_weights).sum() / normalization_factor

            current_node = nodify(layer_index, current_label)
            next_node = nodify(layer_index + 1, next_label)

            edges[current_node, next_node] = edge_weight

    for nodes, weight in edges.items():
        G.add_edge(*nodes, weight=weight)

    pos = nx.spring_layout(G)  # compute graph layout

    plt.axis("off")
    nx.draw_networkx_nodes(G, pos, node_size=10)
    nx.draw_networkx_edges(G, pos, alpha=0.3)
    plt.show(G)
    return G
예제 #23
0
def load_weight_openface(model):
    weights = utils.weights
    weights_dict = utils.load_weights()
    
    for name in weights:
        if model.get_layer(name) != None:
            model.get_layer(name).set_weights(weights_dict[name])
        elif model.get_layer(name) != None:
            model.get_layer(name).set_weights(weights_dict[name])
예제 #24
0
def loadWeight(model):
    weights = utils.weights
    weights_dict = utils.load_weights()
    # Set layer weights of the model
    for name in weights:
        if model.get_layer(name) != None:
            model.get_layer(name).set_weights(weights_dict[name])
        elif model.get_layer(name) != None:
            model.get_layer(name).set_weights(weights_dict[name])
def draw_cluster_by_layer(weights_path,
                          clustering_result,
                          n_clusters=4,
                          with_text=False,
                          size_factor=4,
                          width_factor=30,
                          ax=None):

    G = build_cluster_graph(weights_path, clustering_result)

    labels, _ = clustering_result

    weights = load_weights(weights_path)
    layer_widths = extact_layer_widths(weights)

    color_mapper = get_color_mapper(n_clusters)

    node_size = {}

    (label_by_layer, current_label_by_layer,
     next_label_by_layer) = it.tee(splitter(labels, layer_widths), 3)

    next_label_by_layer = it.islice(next_label_by_layer, 1, None)

    for layer_index, layer_labels in enumerate(label_by_layer):
        unique_labels = sorted(label for label in np.unique(layer_labels)
                               if label != -1)
        for label in unique_labels:
            node_name = nodify(layer_index, label)
            node_size[node_name] = (layer_labels == label).sum()

    pos = nx.drawing.nx_agraph.graphviz_layout(G, prog='dot')
    width = [G[u][v]['weight'] * width_factor for u, v in G.edges()]
    node_color = [color_mapper[int(v.split('-')[1])] for v in G.nodes()]
    node_size = [node_size[v] * size_factor for v in G.nodes()]

    if ax is None:
        _, ax = plt.subplots(1)

    with warnings.catch_warnings():
        warnings.simplefilter('ignore')

        nx.draw(
            G,
            pos,
            with_labels=True,
            node_color=node_color,
            node_size=node_size,
            # font_color='white',
            width=width,
            ax=ax)

    if with_text:
        pprint(edges)

    return ax
예제 #26
0
def test(path):
    (x_train, y_train), (x_test, y_test) = load_cifar(path)
    print(x_train[0][0])
    print(y_train[0])
    vgg = VGG(name='vgg11')
    import utils
    utils.save_weights('./w.pkl', vgg.weights)
    w = utils.load_weights('./w.pkl')
    print(type(w))
    print(w.keys())
예제 #27
0
 def __init__(self, weight_path: str, resize_dimensions: tuple,
              mean: List[float], std: List[float]):
     self.weight_path = weight_path
     self.resize_dimensions = resize_dimensions
     self.mean = mean
     self.gpu = torch.cuda.is_available()
     self.std = std
     self.model = load_weights(model=DeepMAR_ResNet50(),
                               model_weight_path=self.weight_path).eval()
     if self.gpu:
         self.model = self.model.cuda()
예제 #28
0
 def init_network(self, weight_dir=MODEL_WEIGHT_SAVE_DIR):
     p_weights, r_weights, o_weights = load_weights(weight_dir)
     print('PNet weight file is: {}'.format(p_weights))
     self.p_net = p_net()
     self.p_net.load_weights(p_weights)
     if self.mode > 1:
         self.r_net = r_net()
         self.r_net.load_weights(r_weights)
     if self.mode > 2:
         self.o_net = o_net()
         self.o_net.load_weights(o_weights)
예제 #29
0
def initWeights():
    # 기학습된 모델 가중치 불러오기.
    global weights
    global weights_dict
    weights = utils.weights
    weights_dict = utils.load_weights()
    # Set layer weights of the model
    for name in weights:
        if facemodel.get_layer(name) != None:
            facemodel.get_layer(name).set_weights(weights_dict[name])
        elif facemodel.get_layer(name) != None:
            facemodel.get_layer(name).set_weights(weights_dict[name])
예제 #30
0
def make_FCN(FCN_name,
             data,
             ndim,
             model_name='',
             input_shape=(None, 3, None, None),
             pad='same',
             logger=Logger('std')):
    if (isinstance(FCN_name, str)):
        logger.log('load finction ' + FCN_name)
        try:
            FCN = globals()[FCN_name]
        except:
            raise NotImplementedError("No such function " + FCN_name)
    else:
        logger.log('load finction ' + FCN_name.__name__)
        FCN = FCN_name
    datal = res = L.InputLayer(input_shape, data / 256. - 0.5, name='data')
    res = FCN(datal, ndim=ndim, pad=pad)
    if (model_name != ''):
        logger.log('load model ' + model_name)
        load_weights(res, model_name)
    logger.log(get_network_str(res, incomings=True, outgoings=True))
    return res
예제 #31
0
def run_FUCOS(**kwargs):
    training_data = kwargs.get('training_data')
    validation_data = kwargs.get('validation_data')
    batchsize = kwargs.get('batchsize')
    TRAIN = kwargs.get('TRAIN', True)
    run = kwargs.get('run')

    config_sess = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
    config_sess.gpu_options.allow_growth = True
    sess = tf.InteractiveSession(config=config_sess)

    #build the model
    model = []
    with tf.device('/gpu:2'):
        x = tf.placeholder(tf.float32, (None, 135, 240, 3), 'input')
        y_ = tf.placeholder(tf.float32, (None, 135, 240, 1), 'gt')
        keep_prob = tf.placeholder(tf.float32, name='dropout_prob')

        with tf.variable_scope('conv1'):
            conv1 = layers.ConvolutionalLayer(x, [135, 240, 3], [3, 3, 3, 64])
            model.append(conv1)
        with tf.variable_scope('conv2'):
            conv2 = layers.ConvolutionalLayer(conv1.output(), conv1.get_output_shape(), [3, 3, 64, 64], pool=True)
            model.append(conv2)

        with tf.variable_scope('conv3'):
            conv3 = layers.ConvolutionalLayer(conv2.output(), conv2.get_output_shape(), [3, 3, 64, 128])
            model.append(conv3)
        with tf.variable_scope('conv4'):
            conv4 = layers.ConvolutionalLayer(conv3.output(), conv3.get_output_shape(), [3, 3, 128, 128], pool=True)
            model.append(conv4)

        with tf.variable_scope('conv5'):
            conv5 = layers.ConvolutionalLayer(conv4.output(), conv4.get_output_shape(), [3, 3, 128, 256])
            model.append(conv5)
        with tf.variable_scope('conv6'):
            conv6 = layers.ConvolutionalLayer(conv5.output(), conv5.get_output_shape(), [3, 3, 256, 256])
            model.append(conv6)
        with tf.variable_scope('conv7'):
            conv7 = layers.ConvolutionalLayer(conv6.output(), conv6.get_output_shape(), [3, 3, 256, 256], pool=True)
            model.append(conv7)

        with tf.variable_scope('conv8'):
            conv8 = layers.ConvolutionalLayer(conv7.output(), conv7.get_output_shape(), [3, 3, 256, 512])
            model.append(conv8)
        with tf.variable_scope('conv9'):
            conv9 = layers.ConvolutionalLayer(conv8.output(), conv8.get_output_shape(), [3, 3, 512, 512])
            model.append(conv9)
        with tf.variable_scope('conv10'):
            conv10 = layers.ConvolutionalLayer(conv9.output(), conv9.get_output_shape(), [3, 3, 512, 512], pool=True)
            model.append(conv10)

        with tf.variable_scope('conv11'):
            conv11 = layers.ConvolutionalLayer(conv10.output(), conv10.get_output_shape(), [3, 3, 512, 512])
            model.append(conv11)
        with tf.variable_scope('conv12'):
            conv12 = layers.ConvolutionalLayer(conv11.output(), conv11.get_output_shape(), [3, 3, 512, 512])
            model.append(conv12)
        with tf.variable_scope('conv13'):
            conv13 = layers.ConvolutionalLayer(conv12.output(), conv12.get_output_shape(), [3, 3, 512, 512], pool=True)
            model.append(conv13)

        with tf.variable_scope('conv14'):
            conv14 = layers.ConvolutionalLayer(conv13.output(), conv13.get_output_shape(), [7, 7, 512, 4096], drop_out=True,
                                               drop_out_prob=keep_prob)
            model.append(conv14)
        with tf.variable_scope('conv15'):
            conv15 = layers.ConvolutionalLayer(conv14.output(), conv14.get_output_shape(), [1, 1, 4096, 4096], drop_out=True,
                                               drop_out_prob=keep_prob)
            model.append(conv15)
        with tf.variable_scope('convtrans1'):
            deconv1 = layers.ConvolutionalTransposeLayer(conv15.output(), [4, 4, 60, 4096], None)
            model.append(deconv1)
        with tf.variable_scope('conv16'):
            conv16 = layers.ConvolutionalLayer(conv10.output(), conv10.get_output_shape(), [1, 1, 512, 60])
            model.append(conv16)
        conv16_output = conv16.output()
        sum1 = conv16_output + tf.image.resize_images(deconv1.output(), (tf.shape(conv16_output)[1],
                                                                         tf.shape(conv16_output)[2]))

        with tf.variable_scope('convtrans2'):
            deconv2 = layers.ConvolutionalTransposeLayer(sum1, [4, 4, 60, 60], None)
            model.append(deconv2)
        with tf.variable_scope('conv17'):
            conv17 = layers.ConvolutionalLayer(conv7.output(), conv7.get_output_shape(), [1, 1, 256, 60])
            model.append(conv17)
        conv17_output = conv17.output()
        sum2 = conv17_output + tf.image.resize_images(deconv2.output(), (tf.shape(conv17_output)[1],
                                                                         tf.shape(conv17_output)[2]))

        with tf.variable_scope('convtrans3'):
            deconv3 = layers.ConvolutionalTransposeLayer(sum2, [16, 16, 60, 60], None, deconv_stride=(1, 8, 8, 1))
            model.append(deconv3)

        with tf.variable_scope('conv18'):
            conv18 = layers.ConvolutionalLayer(deconv3.output(), deconv3.get_output_shape(), [1, 1, 60, 12])
            model.append(conv18)
        with tf.variable_scope('conv19'):
            conv19 = layers.ConvolutionalLayer(conv18.output(), conv18.get_output_shape_tensor(), [1, 1, 12, 1],
                                               activation=function['linear'])
            model.append(conv19)

        y_pre_activation = tf.image.resize_images(conv19.output(), (135, 240)) #resize to match the ground truth's shape
        y_pred = function['sigmoid'](y_pre_activation) #activate the output by sigmoid

        cost = metrics.MultinoulliCrossEntropy(y_pre_activation, y_) #use binary cross entropy
        var_list = tf.get_collection(tf.GraphKeys().TRAINABLE_VARIABLES)
        L2 = sum([tf.reduce_mean(tf.square(theta)) #L2 regularization
              for theta in (weight for weight in var_list if 'weights' in weight.name)])
        cost += 1e-4 * L2

        opt = tf.train.AdamOptimizer(1e-3, 0.9, 0.99, 1e-8).minimize(cost, var_list=var_list) #ADAM optimization
        accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.cast(y_pred >= 0.5, tf.uint8), tf.cast(y_, tf.uint8)), tf.float32))
        saver = tf.train.Saver()

        if TRAIN:
            tf.Operation.run(tf.global_variables_initializer())
            print('Loading VGG16 weights...')
            load_weights('pretrained/vgg16_weights.npz', model, sess) #load pretrained VGG16 weights

            best_valid_accuracy = 0.
            best_valid_loss = np.inf
            best_epoch = 0
            epoch = 0
            vote_to_terminate = 0
            done_looping = False
            print('TRAINING...')
            start_training_time = time.time()
            while epoch < 200 and not done_looping:
                epoch += 1
                num_iter_training = int(training_data[0].shape[0] / batchsize)
                losses_train = 0.
                accuracies_train = 0.
                start_batch_time = time.time()
                print('Epoch %d...' % epoch)
                batch = next_batch(training_data, batchsize) #training
                for b in batch:
                    fd = {x: b[0], y_: b[1], keep_prob: 0.3}
                    _, a, l = sess.run([opt, accuracy, cost], feed_dict=fd)
                    assert not np.isnan(l), 'Train failed with loss being NaN'
                    losses_train += l
                    accuracies_train += a

                print('\ttraining loss: %s' % (losses_train / num_iter_training))
                print('\ttraining accuracy: %s' % (accuracies_train / num_iter_training))
                print('\tepoch %d took %.2f hours' % (epoch, (time.time() - start_batch_time) / 3600.))

                num_iter_valid = int(validation_data[0].shape[0] / batchsize)
                losses_valid = 0.
                accuracies_valid = 0.
                start_valid_time = time.time()
                batch = next_batch(validation_data, batchsize) #validation
                for b in batch:
                    fd = {x: b[0], y_: b[1], keep_prob: 1}
                    l, a = sess.run([cost, accuracy], feed_dict=fd)
                    losses_valid += l
                    accuracies_valid += a
                avr_acc_valid = accuracies_valid / num_iter_valid
                losses_valid /= num_iter_valid

                print('\tvalidation took %.2f hours' % ((time.time() - start_valid_time) / 3600.))
                print('\tvalidation loss: %s' % losses_valid)
                print('\tvalidation accuracy: %s' % avr_acc_valid)

                if losses_valid < best_valid_loss:
                    best_valid_loss = losses_valid
                    best_epoch = epoch
                    vote_to_terminate = 0
                    print('\tbest validation loss achieved: %.4f' % best_valid_loss)
                    save_path = saver.save(sess, run)
                    print("\tmodel saved in file: %s" % save_path)
                else:
                    vote_to_terminate += 1

                if vote_to_terminate > 30:
                    done_looping = True
            print('Training ends after %.2f hours' % ((time.time() - start_training_time) / 3600.))
            print('\tbest validation accuracy: %.2f' % best_valid_accuracy)
            print('Training the model using all data available...')
            total_training_data = (np.concatenate((training_data[0], validation_data[0])),
                                   np.concatenate((training_data[1], validation_data[1])))
            for i in range(best_epoch):
                num_iter_training = int(total_training_data[0].shape[0] / batchsize)
                losses_train = 0.
                start_batch_time = time.time()
                print('Epoch %d...' % (i+1))
                batch = next_batch(total_training_data, batchsize) #training
                for b in batch:
                    fd = {x: b[0], y_: b[1], keep_prob: 0.1}
                    _, _, l = sess.run([opt, accuracy, cost], feed_dict=fd)
                    assert not np.isnan(l), 'Train failed with loss being NaN'
                    losses_train += l

                print('\ttraining loss: %s' % (losses_train / num_iter_training))
                print('\tepoch %d took %.2f hours' % (i+1, (time.time() - start_batch_time) / 3600.))

        else: #testing
            path = kwargs.get('testing_path')
            isfolder = kwargs.get('isfolder')

            image_list = [path + '/' + f for f in os.listdir(path) if f.endswith('.jpg')] if isfolder else [path]
            saver.restore(sess, tf.train.latest_checkpoint(run))
            print('Checkpoint restored...')
            print('Testing %d images...' % len(image_list))
            images = []
            predictions = []
            time.sleep(0.1)
            for i in tqdm.tqdm(range(len(image_list)), unit='images'):
                ori_img = misc.imread(image_list[i])
                if len(ori_img.shape) < 3:
                    continue
                img = padding(ori_img, 135, 240)
                img = np.reshape(img, (1, 135, 240, 3)) / 255.
                fd = {x: img, keep_prob: 1}
                pred = sess.run(y_pred, feed_dict=fd)
                images.append(ori_img)
                predictions.append(pred)
            time.sleep(0.1)
            print('Testing finished!')

            for i in range(len(images)):
                plt.figure(1)
                image = images[i]
                sal = np.reshape(predictions[i], (135, 240))
                sal = depadding(sal, image.shape[0], image.shape[1])
                sal = sal * (sal > np.percentile(sal, 95))
                sal = gaussian_filter(sal, sigma=0.09*sal.shape[0])
                sal = (sal - np.min(sal)) / (np.max(sal) - np.min(sal))
                plt.subplot(211)
                plt.imshow(image)
                plt.subplot(212)
                plt.imshow(sal, cmap='gray')
                plt.show()