예제 #1
0
파일: mtl.py 프로젝트: qianrusun1015/E3BM-1
    def __init__(self, args, mode='meta', num_cls=64):
        super().__init__()
        self.args = args
        self.mode = mode
        self.update_lr = args.base_lr
        self.update_step = args.update_step
        z_dim = 640
        self.base_learner = BaseLearner(args, z_dim)

        if self.mode == 'meta':
            from networks.resnet_mtl import ResNetMtl
            self.encoder = ResNetMtl()
            if args.hyperprior_arch == 'LSTM':
                self.hyperprior_combination_model = HyperpriorCombinationLSTM(
                    args, self.update_step, z_dim)
                self.hyperprior_basestep_model = HyperpriorBasestepLSTM(
                    args, self.update_step, self.update_lr, z_dim)
            else:
                self.hyperprior_combination_model = HyperpriorCombination(
                    args, self.update_step, z_dim)
                self.hyperprior_basestep_model = HyperpriorBasestep(
                    args, self.update_step, self.update_lr, z_dim)
        else:
            from networks.resnet import ResNet
            self.encoder = ResNet()
            self.pre_fc = nn.Sequential(nn.Linear(640, 512), nn.ReLU(),
                                        nn.Linear(512, num_cls))
def ResNeXt58_for_ctpn(inputs, scope="resnext"):
    def stack_fn(x):
        x = stack3(x, 128, 3, name='conv2')
        x = stack3(x, 256, 8, name='conv3')  # 1/8 size
        x = stack3(x, 512, 8, name='conv4')  # 1/16 size
        return x

    with backend.name_scope(scope):
        outputs = ResNet(inputs, stack_fn, use_bias=False,
                         block_preact=False)  # 1/16 size

    return outputs
def ResNeXt58_for_char_recog(inputs, feat_stride=16, scope="resnext"):
    def stack_fn(x):
        x = stack3(x, 128, 3, name='conv2')
        x = stack3(x, 256, 8, name='conv3')  # 1/8 size
        x = stack3(x, 512, 8, stride1=feat_stride // 8,
                   name='conv4')  # 1/8 or 1/16
        return x

    with backend.name_scope(scope):
        outputs = ResNet(inputs, stack_fn, use_bias=True,
                         block_preact=False)  # 1/16 size

    return outputs
def ReNext40_segment_double_line(inputs, feat_stride=16, scope="resnext"):
    def stack_fn(x):
        x = stack3(x, 128, 3, name='conv2')
        x = stack3(x, 256, 4, name='conv3')  # 1/8 size
        x = stack3(x, 512, 4, name='conv4')  # 1/16 size
        x = stack3(x, 1024, 2, stride1=feat_stride // 16,
                   name='conv5')  # 1/16 or 1/32
        return x

    with backend.name_scope(scope):
        outputs = ResNet(inputs, stack_fn, use_bias=True, block_preact=False)

    return outputs
def ResNeXt76_for_yolo(inputs, scope="resnext"):
    def stack_fn(x):
        x = stack3(x, 128, 3, name='conv2')
        x1 = stack3(x, 256, 8, name='conv3')  # 1/8 size
        x2 = stack3(x1, 512, 8, name='conv4')  # 1/16 size
        x3 = stack3(x2, 1024, 6, name='conv5')  # 1/32 size
        return [x1, x2, x3]

    with backend.name_scope(scope):
        features_list = ResNet(inputs,
                               stack_fn,
                               use_bias=False,
                               block_preact=False)

    return features_list
예제 #6
0
파일: JLDCF.py 프로젝트: W-Ori/JL-DCF
def build_model(base_model_cfg='resnet'):
    feature_aggregation_module = []
    for i in range(5):
        feature_aggregation_module.append(FAModule())
    upsampling = []
    for i in range(0, 4):
        upsampling.append([])
        for j in range(0, i + 1):
            upsampling[i].append(
                nn.ConvTranspose2d(k,
                                   k,
                                   kernel_size=2**(j + 2),
                                   stride=2**(j + 1),
                                   padding=2**(j)))
    if base_model_cfg == 'resnet':
        backbone = ResNet(Bottleneck, [3, 4, 23, 3])
        return JL_DCF(base_model_cfg, JLModule(backbone),
                      CMLayer(), feature_aggregation_module, ScoreLayer(k),
                      ScoreLayer(k), upsampling)
예제 #7
0
from networks.resnet import ResNet

####模型训练图\myplot.png


resnet=ResNet()
resnet.train()
예제 #8
0
파일: __init__.py 프로젝트: Bahlat87/ZazuML
def get_model(conf, num_class=10, local_rank=-1):
    name = conf['type']

    if name == 'resnet50':
        model = ResNet(dataset='imagenet',
                       depth=50,
                       num_classes=num_class,
                       bottleneck=True)
    elif name == 'resnet200':
        model = ResNet(dataset='imagenet',
                       depth=200,
                       num_classes=num_class,
                       bottleneck=True)
    elif name == 'wresnet40_2':
        model = WideResNet(40, 2, dropout_rate=0.0, num_classes=num_class)
    elif name == 'wresnet28_10':
        model = WideResNet(28, 10, dropout_rate=0.0, num_classes=num_class)

    elif name == 'shakeshake26_2x32d':
        model = ShakeResNet(26, 32, num_class)
    elif name == 'shakeshake26_2x64d':
        model = ShakeResNet(26, 64, num_class)
    elif name == 'shakeshake26_2x96d':
        model = ShakeResNet(26, 96, num_class)
    elif name == 'shakeshake26_2x112d':
        model = ShakeResNet(26, 112, num_class)

    elif name == 'shakeshake26_2x96d_next':
        model = ShakeResNeXt(26, 96, 4, num_class)

    elif name == 'pyramid':
        model = PyramidNet('cifar10',
                           depth=conf['depth'],
                           alpha=conf['alpha'],
                           num_classes=num_class,
                           bottleneck=conf['bottleneck'])

    elif 'efficientnet' in name:
        model = EfficientNet.from_name(
            name,
            condconv_num_expert=conf['condconv_num_expert'],
            norm_layer=None)  # TpuBatchNormalization
        if local_rank >= 0:
            model = nn.SyncBatchNorm.convert_sync_batchnorm(model)

        def kernel_initializer(module):
            def get_fan_in_out(module):
                num_input_fmaps = module.weight.size(1)
                num_output_fmaps = module.weight.size(0)
                receptive_field_size = 1
                if module.weight.dim() > 2:
                    receptive_field_size = module.weight[0][0].numel()
                fan_in = num_input_fmaps * receptive_field_size
                fan_out = num_output_fmaps * receptive_field_size
                return fan_in, fan_out

            if isinstance(module, torch.nn.Conv2d):
                # https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py#L58
                fan_in, fan_out = get_fan_in_out(module)
                torch.nn.init.normal_(module.weight,
                                      mean=0.0,
                                      std=np.sqrt(2.0 / fan_out))
                if module.bias is not None:
                    torch.nn.init.constant_(module.bias, val=0.)
            elif isinstance(module, RoutingFn):
                torch.nn.init.xavier_uniform_(module.weight)
                torch.nn.init.constant_(module.bias, val=0.)
            elif isinstance(module, torch.nn.Linear):
                # https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py#L82
                fan_in, fan_out = get_fan_in_out(module)
                delta = 1.0 / np.sqrt(fan_out)
                torch.nn.init.uniform_(module.weight, a=-delta, b=delta)
                if module.bias is not None:
                    torch.nn.init.constant_(module.bias, val=0.)

        model.apply(kernel_initializer)
    else:
        raise NameError('no model named, %s' % name)

    if local_rank >= 0:
        device = torch.device('cuda', local_rank)
        model = model.to(device)
        model = DistributedDataParallel(model,
                                        device_ids=[local_rank],
                                        output_device=local_rank)
    else:
        model = model.cuda()


#         model = DataParallel(model)

    cudnn.benchmark = True
    return model
예제 #9
0
class_names = [
    'airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',
    'ship', 'truck'
]
#(x_train, y_train), (x_test, y_test) = cifar10.load_data()
images, labels = foolbox.utils.samples(dataset='cifar10',
                                       index=0,
                                       batchsize=10)
images = np.array([img_to_array(original) for original in images
                   ])  # Convert the PIL image to a numpy array
#print(np.shape(y_test))
print(np.shape(labels.reshape(10, 1)))
labels = labels.reshape(10, 1)
#自定义网络
# lenet = LeNet()
resnet = ResNet()

# model_filename = 'networks/models/resnet.h5'
# resnet=load_model(model_filename)

models = [resnet]

network_stats, correct_imgs = helper.evaluate_models(models, images, labels)
correct_imgs = pd.DataFrame(
    correct_imgs, columns=['name', 'img', 'label', 'confidence', 'pred'])
network_stats = pd.DataFrame(network_stats,
                             columns=['name', 'accuracy', 'param_count'])

print(network_stats)

pixel = np.array([16, 20, 0, 255, 255])
예제 #10
0
"""
利用cifar-10数据集在ResNet带分支网络模型上进行测试
"""
from branchynet import utils, visualize
from networks.resnet import ResNet
from datasets import cifar10
import dill

# 定义ResNet网络
branchyNet = ResNet().build()
branchyNet.to_gpu()
branchyNet.training()
branchyNet.verbose = True

# 参数设置
TRAIN_BATCHSIZE = 128
TEST_BATCHSIZE = 16
TRAIN_NUM_EPOCHES = 100
SAVE_PATH = '../pic/resnet_cifar10/'  # 实验结果图片保存路径
MODEL_NAME = '../models/resnet_cifar10(' + str(
    TRAIN_NUM_EPOCHES) + ').bn'  # 保存模型名称
MAIN_MODEL_NAME = '../models/main_resnet_cifar10(' + str(
    TRAIN_NUM_EPOCHES) + ').bn'
CSV_NAME = 'resnet(' + str(TRAIN_NUM_EPOCHES) + ')'  # 输出文件名称

# 导入cifar10数据集
X_train, Y_train, X_test, Y_test = cifar10.get_data()

print("X_train:{} Y_train:{}".format(X_train.shape, Y_train.shape))
print("X_test: {} Y_test: {}".format(X_test.shape, Y_test.shape))