Example #1
0
                        layer.set_weights(parent_layer_weights)
                    except:
                        # PARTIAL WEIGHT TRANSFER
                        partial_weight_transfer(layer, parent_layer_weights,
                                                disp)
                else:
                    if disp:
                        print('Did not transfer weights to {}'.format(
                            layer.name))
    return child


if __name__ == '__main__':
    from utils import get_flops
    from dataset.coco import cn as cfg
    cfg.merge_from_file('../configs/evopose_s4_656_XS.yaml')
    model = EvoPose(cfg)
    print('{:.2f}M / {:.2f}G'.format(model.count_params() / 1e6,
                                     get_flops(model) / 1e9 / 2))

    # search space size:
    # default_genotype = np.array(genotype_from_blocks_args(DEFAULT_BLOCKS_ARGS))
    # s = 2**default_genotype.shape[0] * 4 ** default_genotype.shape[0]  # kernel and repeats
    # for c in default_genotype[:, 2]:  # channels
    #     s *= c
    # s *= 2**3  # stride
    # print('Search space size: 10^{:.0f}'.format(np.log10(s)))

    # np.random.seed(0)
    # cfg.MODEL.LOAD_WEIGHTS = False
    # parent_genotype = genotype_from_blocks_args(DEFAULT_BLOCKS_ARGS)
Example #2
0
    parser.add_argument('-c',
                        '--cfg',
                        default='./configs/evopose2d_M_f32.yaml')
    parser.add_argument(
        '-p',
        '--coco-path',
        required=True,
        default='./data/',
        help='Path to folder containing COCO images and annotation directories.'
    )
    parser.add_argument('-i', '--img-id', type=int, default=785)
    parser.add_argument('--alpha', type=float, default=0.8)
    args = parser.parse_args()

    # load the config .yaml file
    cfg.merge_from_file('configs/' + args.cfg)

    # load the trained model
    model = tf.keras.models.load_model('models/{}.h5'.format(
        args.cfg.split('.yaml')[0]))
    cfg.DATASET.OUTPUT_SHAPE = model.output_shape[1:]

    # load the dataset annotations
    coco = COCO(
        osp.join(args.coco_path, 'annotations',
                 'person_keypoints_val2017.json'))
    img_data = coco.loadImgs([args.img_id])[0]

    annotation = coco.loadAnns(coco.getAnnIds([args.img_id]))[0]
    bbox = annotation['bbox']
    kp = np.array(annotation['keypoints']).reshape(-1, 3)  # not used
                                   cfg.MODEL.HEAD_KERNEL,
                                   strides=2,
                                   padding='same',
                                   use_bias=False,
                                   kernel_regularizer=regularizer,
                                   name='head_conv{}'.format(i + 1))(x)
        x = layers.BatchNormalization(name='head_bn{}'.format(i + 1))(x)
        x = layers.Activation(cfg.MODEL.HEAD_ACTIVATION,
                              name='head_act{}'.format(i + 1))(x)

    x = layers.Conv2D(cfg.DATASET.OUTPUT_SHAPE[-1],
                      1,
                      padding='same',
                      use_bias=True,
                      kernel_regularizer=regularizer,
                      name='final_conv')(x)

    return Model(backbone.input, x, name='sb_{}'.format(cfg.MODEL.BACKBONE))


if __name__ == '__main__':
    from dataset.coco import cn as cfg
    cfg.merge_from_file('../configs/sb_resnet50_256x192.yaml')
    cfg.DATASET.INPUT_SHAPE = [384, 288, 3]
    cfg.MODEL.BACKBONE = 'resnet152'
    model = SimpleBaseline(cfg)
    model.summary()
    print('{:.2f}M parameters | {:.2f}G multiply-adds'.format(
        model.count_params() / 1e6,
        get_flops(model) / 1e9 / 2))
Example #4
0
    # stem
    for i in range(2):
        x = Conv2D(64,
                   3,
                   2,
                   'same',
                   use_bias=False,
                   name='conv{}'.format(i + 1))(x)
        x = BatchNormalization(name='bn{}'.format(i + 1))(x)
        x = ReLU(name='relu{}'.format(i + 1))(x)

    xs = [x]
    stage_cfgs = [cfg.MODEL['STAGE{}'.format(i + 1)] for i in range(4)]
    for i, stage_cfg in enumerate(stage_cfgs):
        xs = stage(xs, stage_cfg, name='s{}'.format(i + 1))

    output = Conv2D(cfg.DATASET.OUTPUT_SHAPE[-1], 1, name='final_conv')(xs[0])
    model = Model(inputs=input, outputs=output, name='hrnet')
    add_regularization(model, l2(cfg.TRAIN.WD))
    return model


if __name__ == '__main__':
    from utils import get_flops
    from dataset.coco import cn as cfg
    cfg.merge_from_file('../configs/hrnet_w32_256x192.yaml')
    # cfg.DATASET.INPUT_SHAPE = [256, 192, 3]
    model = HRNet(cfg)
    # model.summary()
    print('{:.2f}M / {:.2f}G'.format(model.count_params() / 1e6,
                                     get_flops(model) / 1e9 / 2))
Example #5
0
    parser = argparse.ArgumentParser()
    parser.add_argument('-c', '--cfg', default='E3.yaml')
    parser.add_argument('--accelerator-prefix', default='node-')
    parser.add_argument('-a',
                        '--accelerator-ids',
                        type=int,
                        nargs='+',
                        default=[0])
    parser.add_argument('-ar',
                        '--accelerator-range',
                        type=int,
                        nargs='+',
                        default=None)
    args = parser.parse_args()

    cfg.merge_from_file(osp.join('configs/', args.cfg))

    if args.accelerator_range:
        accelerators = [
            '{}{}'.format(args.accelerator_prefix, a)
            for a in range(*args.accelerator_range)
        ]
    else:
        accelerators = [
            '{}{}'.format(args.accelerator_prefix, a)
            for a in args.accelerator_ids
        ]

    assert cfg.SEARCH.CHILDREN % cfg.SEARCH.PARENTS == 0, \
        'Number of children must be divisible by number of parents.'
    assert cfg.SEARCH.CHILDREN % len(accelerators) == 0, \