예제 #1
0
 def __init__(self):
     # load config file
     config = json.load(open("model/config.json"))
     # get the image processor
     self._imageProcessor = ImageProcessor(config)
     # load the DL model
     self._model = Xception()
     self._model.load_weights('model/model.h5')
     self._model._make_predict_function()
예제 #2
0
def Run(self, img_path, model_name):

    # config variables
    weights = 'imagenet'
    include_top = 0
    train_path = 'jpg'
    classfier_file = 'output/flowers_17/' + model_name + '/classifier.cpickle'

    # create the pretrained models
    # check for pretrained weight usage or not
    # check for top layers to be included or not
    if model_name == "vgg16":
        from vgg16 import VGG16, preprocess_input
        base_model = VGG16(weights=weights)
        model = Model(inputs=base_model.input,
                      outputs=base_model.get_layer('fc1').output)
        image_size = (224, 224)
    elif model_name == "vgg19":
        from vgg19 import VGG19, preprocess_input
        base_model = VGG19(weights=weights)
        model = Model(inputs=base_model.input,
                      outputs=base_model.get_layer('fc1').output)
        image_size = (224, 224)
    elif model_name == "resnet50":
        from resnet50 import ResNet50, preprocess_input
        base_model = ResNet50(weights=weights)
        model = Model(inputs=base_model.input,
                      outputs=base_model.get_layer('avg_pool').output)
        image_size = (224, 224)
    elif model_name == "inceptionv3":
        from inception_v3 import InceptionV3, preprocess_input
        base_model = InceptionV3(weights=weights)
        model = Model(inputs=base_model.input,
                      outputs=base_model.get_layer('mixed9').output)
        image_size = (299, 299)
    elif model_name == "xception":
        from xception import Xception, preprocess_input
        base_model = Xception(weights=weights)
        model = Model(inputs=base_model.input,
                      outputs=base_model.get_layer('avg_pool').output)
        image_size = (299, 299)
    else:
        base_model = None

    img = image.load_img(img_path, target_size=image_size)
    img_array = image.img_to_array(img)
    img_array = np.expand_dims(img_array, axis=0)
    img_array = preprocess_input(img_array)
    feature = model.predict(img_array)
    feature = feature.flatten()
    with open(classfier_file, 'rb') as f:
        model2 = pickle.load(f)

    pred = model2.predict(feature)
    prob = model2.predict_proba(np.atleast_2d(feature))[0]

    return pred, prob[0]
예제 #3
0
파일: models.py 프로젝트: BaopingLiu/F3Net
    def init_xcep_FAD(self):
        self.FAD_xcep = Xception(self.num_classes)

        # To get a good performance, using ImageNet-pretrained Xception model is recommended
        state_dict = get_xcep_state_dict()
        conv1_data = state_dict['conv1.weight'].data

        self.FAD_xcep.load_state_dict(state_dict, False)

        # copy on conv1
        # let new conv1 use old param to balance the network
        self.FAD_xcep.conv1 = nn.Conv2d(12, 32, 3, 2, 0, bias=False)
        for i in range(4):
            self.FAD_xcep.conv1.weight.data[:, i * 3:(i + 1) *
                                            3, :, :] = conv1_data / 4.0
예제 #4
0
def main_worker(local_rank, args):
    args.local_rank = local_rank
    # prepare dist environment
    dist.init_process_group(backend='nccl',
                            rank=args.local_rank,
                            world_size=args.world_size)
    torch.cuda.set_device(args.local_rank)
    network = Xception(num_classes=cfg.num_classes)
    network = network.cuda()
    network = torch.nn.parallel.DistributedDataParallel(
        network, device_ids=[args.local_rank])
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(network.parameters(),
                          lr=cfg.lr_init,
                          momentum=cfg.SGD_momentum)
    dataloader_train = create_dataset_pytorch_imagenet_dist_train(
        data_path=args.data_path + 'train',
        local_rank=local_rank,
        n_workers=cfg.n_workers)
    dataloader_test = create_dataset_pytorch_imagenet(
        data_path=args.data_path + 'val',
        is_train=False,
        n_workers=cfg.n_workers)

    step_per_epoch = len(dataloader_train)
    scheduler = optim.lr_scheduler.StepLR(optimizer,
                                          gamma=cfg.lr_decay_rate,
                                          step_size=cfg.lr_decay_epoch *
                                          step_per_epoch)
    summary_writer = None
    if local_rank == 0:
        summary_writer = SummaryWriter(log_dir='./summary')
    trainer = Trainer(network=network,
                      criterion=criterion,
                      optimizer=optimizer,
                      scheduler=scheduler,
                      dataloader_train=dataloader_train,
                      dataloader_test=dataloader_test,
                      summary_writer=summary_writer,
                      epoch_size=cfg.epoch_size,
                      ckpt_path=args.ckpt_path,
                      local_rank=local_rank)

    for epoch_id in range(cfg.epoch_size):
        trainer.step()

    if local_rank == 0:
        summary_writer.close()
예제 #5
0
def load_model (args):

	if args.model == 'inception':
		model = InceptionV3(include_top=True, weights='imagenet')
		preprocess_mode='tf'
	elif args.model == 'xception':
		model = Xception(include_top=True, weights='imagenet')
		preprocess_mode='tf'
	elif args.model == 'inceptionresnet':
		model = InceptionResNetV2(include_top=True, weights='imagenet')
		preprocess_mode='tf'
	elif args.model == 'mobilenet':
		model = MobileNet(include_top=True, weights='imagenet')
		preprocess_mode='tf'
	elif args.model == 'mobilenet2':	
		model = MobileNetV2(include_top=True, weights='imagenet')
		preprocess_mode='tf'
	elif args.model == 'nasnet':	
		model = NASNetLarge(include_top=True, weights='imagenet')
		preprocess_mode='tf'
	elif args.model == 'resnet':
		model = ResNet50(include_top=True, weights='imagenet')
		preprocess_mode='caffe'
	elif args.model == 'vgg16':
		model = VGG16(include_top=True, weights='imagenet')
		preprocess_mode='caffe'
	elif args.model == 'vgg19':
		model = VGG19(include_top=True, weights='imagenet')
		preprocess_mode='caffe'
	else:
		print ("Model not found")

	return model,preprocess_mode
예제 #6
0
class Model(ModelBase):
    def __init__(self):
        # load config file
        config = json.load(open("model/config.json"))
        # get the image processor
        self._imageProcessor = ImageProcessor(config)
        # load the DL model
        self._model = Xception()
        self._model.load_weights('model/model.h5')
        self._model._make_predict_function()

    def infer(self, input):
        # load preprocessed input
        inputAsNpArr = self._imageProcessor.loadAndPreprocess(input)
        # Run inference with caffe2
        results = self._model.predict(inputAsNpArr)
        # postprocess results into output
        output = self._imageProcessor.computeOutput(results)
        return output
예제 #7
0
def main():
    # parser = argparse.ArgumentParser(description = 'Deep Learning Framework 1 Argument')
    # parser.add_argument('--epochs', type = int, default = 100, metavar = 'N', help = 'number of epochs to train and test the model (default=100)')
    # args = parser.parse_args()
    transform = transforms.Compose([
        transforms.Grayscale(num_output_channels=1),
        transforms.ToTensor(),
        transforms.Normalize([
            0.4161,
        ], [
            0.1688,
        ]),
    ])

    train_dataset = datasets.ImageFolder('cropped_trainset', transform)
    test_dataset = datasets.ImageFolder('cropped_testset', transform)

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=3,
                                               shuffle=True,
                                               num_workers=2)

    test_loader = torch.utils.data.DataLoader(test_dataset,
                                              batch_size=1,
                                              shuffle=False,
                                              num_workers=2)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    model = Xception().to(device)

    optimizer = optim.Adam(model.parameters(), lr=0.001)
    scheduler = StepLR(optimizer, step_size=1, gamma=0.8)

    # set you own epoch
    for epoch in range(100):
        train(model, device, train_loader, optimizer, epoch)
        test(model, device, test_loader)
        """
		use train and test function to train and test your model

		"""
    save_models(model)
예제 #8
0
def getNet():
    input1 = Input(shape=(1920,1920,3), name="input1")
    x=SeparableConv2D(3,(3,3),strides=2,padding="same",activation="relu")(input1)
    x=SeparableConv2D(3,(3,3),strides=2,padding="same",activation="relu")(x)
    # x=BatchNormalization()(x)
    x=Xception(weights=None,input_shape=(480,480,3),include_top=False)(x)
    # x=NASNetLarge(weights="imagenet",input_shape=(331,331,3),include_top=False)(x)
    x = SeparableConv2D(2048, (3, 3), padding='same', use_bias=False, name='block14_sepconv2')(x)
    x = BatchNormalization(name='block14_sepconv2_bn')(x)
    x = Activation('relu', name='block14_sepconv2_act')(x)
    x=GlobalAveragePooling2D()(x)
    x=Dense(1,activation="sigmoid")(x)

    # 编译模型
    model = Model(inputs=input1, outputs=x)
    return model
예제 #9
0
    parser.add_argument('--data_path',
                        type=str,
                        default="./data",
                        help='path where the dataset is saved')
    parser.add_argument('--ckpt_path',
                        type=str,
                        default="./checkpoint",
                        help='path where the checkpoint to be saved')
    parser.add_argument('--device_id',
                        type=int,
                        default=0,
                        help='device id of GPU. (Default: 0)')
    args = parser.parse_args()

    device = torch.device('cuda:' + str(args.device_id))
    network = Xception(num_classes=cfg.num_classes)
    network.to(device)
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(network.parameters(),
                          lr=cfg.lr_init,
                          momentum=cfg.SGD_momentum)
    dataloader_train = create_dataset_pytorch_cifar10(args.data_path)
    dataloader_test = create_dataset_pytorch_cifar10(args.data_path,
                                                     is_train=False)
    scheduler = optim.lr_scheduler.StepLR(optimizer,
                                          gamma=cfg.lr_decay_rate,
                                          step_size=cfg.lr_decay_epoch *
                                          len(dataloader_train))
    summary_writer = SummaryWriter(log_dir='./summary')
    trainer = Trainer(network=network,
                      criterion=criterion,
예제 #10
0
파일: models.py 프로젝트: BaopingLiu/F3Net
    def init_xcep(self):
        self.xcep = Xception(self.num_classes)

        # To get a good performance, using ImageNet-pretrained Xception model is recommended
        state_dict = get_xcep_state_dict()
        self.xcep.load_state_dict(state_dict, False)
예제 #11
0
    print(len(images), 'images')
    return images


def chunk(l, n):
    for i in xrange(0, len(l), n):
        yield l[i:i + n]


input_dir = sys.argv[1]
out_fn = sys.argv[2]
images = list_images(input_dir)
out_file = open(out_fn, 'w')

imodel = InceptionResNetV2(weights='imagenet')
xmodel = Xception(weights='imagenet')
rmodel = ResNet50(weights='imagenet')
vmodel = VGG19(weights='imagenet')

for fs in chunk(images, 100):
    xs299 = []
    xs224 = []
    for f in fs:
        img_path = join(input_dir, f)

        img = image.load_img(img_path, target_size=(299, 299))
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = Inc_preprocess_input(x)
        xs299.append(x[0, :, :])
예제 #12
0
        return gray_image


class DeepfakeTest():
    def __init__(self, model, ckpt_dir):
        self.transform = ToTensor()
        self.checkpoint = torch.load(ckpt_dir, map_location=device)
        self.model = model
        self.model.load_state_dict(self.checkpoint['model_state_dict'])
        # Model on cuda
        if torch.cuda.is_available():
            self.model = model.cuda().eval()

    def test_im(self, im_dir):
        im = self.transform(io.imread(im_dir))
        with torch.no_grad():
            input_image = im.cpu().float()
            # compute output
            output = self.model(input_image).cpu().numpy()
            a = output.tolist()
        return a


if __name__ == '__main__':
    device = torch.device('cpu')
    tester = DeepfakeTest(model=Xception(),
                          ckpt_dir='log_path/Xception_trained_model.pth')
    imgs = ['cropped_testset/real/trump.png']
    results = [tester.test_im(img) for img in imgs]
    print(results)
예제 #13
0
def train_factory(MODEL_NAME):

    config = tf.ConfigProto()
    config.gpu_options.allocator_type = 'BFC'
    config.gpu_options.allow_growth = True
    set_session(tf.Session(config=config)) 
    # model = CCR(input_shape=(img_width,img_height,1),classes=charset_size)
    # model = LeNet.build(width=img_width, height=img_height, depth=1, classes=charset_size)
    # model = ResNet.build_model(SHAPE=(img_width,img_height,1), classes=charset_size)

    # vgg net 5
    # MODEL_PATH='trained_model/vggnet5.hdf5'
    # model=VGGNet5.vgg(input_shape=(img_width,img_height,1),classes=charset_size)

    model=None
    if(MODEL_NAME=='inception_resnet_v2'):
        model=InceptionResNetV2.inception_resnet_v2(input_shape=(img_width,img_height,3),classes=charset_size,weights='./trained_model/inception_resnet_v2/inception_resnet_v2.12-0.8244.hdf5')
    elif(MODEL_NAME=='xception'):
        # xeception
        model=Xception.Xception((img_width,img_height,3),classes=charset_size)
    elif(MODEL_NAME=='mobilenet_v2'):
        #mobilenet v2
        model=MobileNetv2.MobileNet_v2((img_width,img_height,3),classes=charset_size)
    elif(MODEL_NAME=='inception_v3'):
        #mobilenet v2
        model=Inception_v3.inception((img_width,img_height,3),classes=charset_size)
    elif(MODEL_NAME=='vgg16'):
        model=VGGNet.vgg(input_shape=(img_width,img_height,3),classes=charset_size)
    elif(MODEL_NAME=='vgg19'):
        model=VGG19.VGG19(input_shape=(img_width,img_height,3),classes=charset_size,weights='weights/vgg19_weights_tf_dim_ordering_tf_kernels.h5')
    elif(MODEL_NAME=='resnet50'):
        model=ResNet50.resnet(input_shape=(img_width,img_height,3),classes=charset_size)
    elif(MODEL_NAME=='inception_v4'):
        model=inception_v4.inception_v4(input_shape=(img_width,img_height,3),classes=charset_size)
    elif(MODEL_NAME=='resnet34'):
        model=ResNet34.ResNet34(input_shape=(img_width,img_height,3),classes=charset_size)
    elif(MODEL_NAME=='densenet121'):
        model=DenseNet.DenseNet(input_shape=(img_width,img_height,3),classes=charset_size)
    elif(MODEL_NAME=='densenet161'):
        model=DenseNet.DenseNet(input_shape=(img_width,img_height,3),classes=charset_size)
    elif(MODEL_NAME=='shufflenet_v2'):
        model=ShuffleNetV2.ShuffleNetV2(input_shape=(img_width,img_height,3),classes=charset_size)
    elif(MODEL_NAME=='resnet_attention_56'):
        model=Resnet_Attention_56.Resnet_Attention_56(input_shape=(img_width,img_height,3),classes=charset_size)
    elif(MODEL_NAME=='squeezenet'):
        model=SqueezeNet.SqueezeNet(input_shape=(img_width,img_height,3),classes=charset_size)
    elif(MODEL_NAME=='seresnet50'):
        model=SEResNet50.SEResNet50(input_shape=(img_width,img_height,3),classes=charset_size)
    elif(MODEL_NAME=='se_resnext'):
        model=SEResNext.SEResNext(input_shape=(img_width,img_height,3),classes=charset_size)
    elif(MODEL_NAME=='nasnet'):
        model=NASNet.NASNetLarge(input_shape=(img_width,img_height,3),classes=charset_size)
    elif(MODEL_NAME=='custom'):
        model=Custom_Network.Custom_Network(input_shape=(img_width,img_height,3),classes=charset_size)
    elif(MODEL_NAME=='resnet18'):
        model=ResnetBuilder.build_resnet_18(input_shape=(img_width,img_height,3),num_outputs=charset_size)



    print(model.summary())
    train(model,MODEL_NAME)
예제 #14
0
    base_model = VGG19(weights=weights)
    model = Model(inputs=base_model.input,
                  outputs=base_model.get_layer('fc1').output)
    image_size = (224, 224)
elif model_name == "resnet50":
    base_model = ResNet50(weights=weights)
    model = Model(inputs=base_model.input,
                  outputs=base_model.get_layer('flatten').output)
    image_size = (224, 224)
elif model_name == "inceptionv3":
    base_model = InceptionV3(weights=weights)
    model = Model(inputs=base_model.input,
                  outputs=base_model.get_layer('mixed9').output)
    image_size = (299, 299)
elif model_name == "xception":
    base_model = Xception(weights=weights)
    model = Model(inputs=base_model.input,
                  outputs=base_model.get_layer('avg_pool').output)
    image_size = (299, 299)
else:
    base_model = None

# In[ ]:

# path to training dataset
train_labels = sorted(os.listdir(train_path))

# variables to hold features and labels
features = []
labels = []
예제 #15
0
import io
import json
import dlib
import torch
import base64
from io import BufferedReader, BytesIO
from skimage import io, color
import numpy as np
from PIL import Image
from flask import Flask, jsonify, request
from werkzeug.utils import secure_filename
from xception import Xception

app = Flask(__name__)
device = torch.device('cpu')
model = Xception()
ckpt_dir = 'log_path/Xception_trained_model.pth'
checkpoint = torch.load(ckpt_dir, map_location=device)
model.load_state_dict(checkpoint['model_state_dict'])
model.eval()
message = ''


def crop_image(file):
    detector_ori = dlib.get_frontal_face_detector()
    # open the image file
    try:
        img = io.imread(file)
    except Exception as e:
        message = "While processing, " + str(e)
        return message
예제 #16
0
	model = Model(inputs=base_model.input, outputs=base_model.get_layer('fc1').output)
	image_size = (224, 224)
elif model_name == "vgg19":
	base_model = VGG19(weights=weights)
	model = Model(inputs=base_model.input, outputs=base_model.get_layer('fc1').output)
	image_size = (224, 224)
elif model_name == "resnet50":
	base_model = ResNet50(weights=weights)
	model = Model(inputs=base_model.input, outputs=base_model.get_layer('flatten').output)
	image_size = (224, 224)
elif model_name == "inceptionv3":
	base_model = InceptionV3(weights=weights)
	model = Model(inputs=base_model.input, outputs=base_model.get_layer('mixed9').output)
	image_size = (299, 299)
elif model_name == "xception":
	base_model = Xception(weights=weights)
	model = Model(inputs=base_model.input, outputs=base_model.get_layer('avg_pool').output)
	image_size = (299, 299)
else:
	base_model = None


# In[10]:


# loop over all the labels in the folder
for label in train_labels:
    cur_path = train_path + "/" + label
    for image_path in glob.glob(cur_path):
        img = image.load_img(image_path, target_size=image_size)
        x = image.img_to_array(img)
    data_format='channels_last', preprocessing_function=preprocess_input)

train_generator = data_generator.flow_from_directory(os.path.join(
    data_dir, 'teacher_no_resizing'),
                                                     target_size=(299, 299),
                                                     batch_size=64)

val_generator = data_generator_val.flow_from_directory(os.path.join(
    data_dir, 'val_no_resizing'),
                                                       shuffle=False,
                                                       target_size=(299, 299),
                                                       batch_size=64)

# # Model

model = Xception(weight_decay=1e-5)
model.count_params()


# add entropy to the usual logloss (it is for regularization),
# "Regularizing Neural Networks by Penalizing Confident Output Distributions",
# https://arxiv.org/abs/1701.06548
# it reduces overfitting a little bit
def loss(y_true, y_pred):
    entropy = -K.mean(K.sum(y_pred * K.log(y_pred), 1))
    beta = 0.1
    return logloss(y_true, y_pred) - beta * entropy


model.compile(
    #     optimizer=optimizers.SGD(lr=1e-2, momentum=0.9, nesterov=True),
def load_model (args):

	if args.output_layer == '0':
		if args.model == 'inception':
			model = InceptionV3(include_top=False, weights='imagenet', pooling=args.pooling)
			preprocess_mode='tf'
		elif args.model == 'xception':
			model = Xception(include_top=False, weights='imagenet', pooling=args.pooling)
			preprocess_mode='tf'
		elif args.model == 'inceptionresnet':
			model = InceptionResNetV2(include_top=False, weights='imagenet', pooling=args.pooling)
			preprocess_mode='tf'
		elif args.model == 'mobilenet':
			model = MobileNet(include_top=False, weights='imagenet', pooling=args.pooling)
			preprocess_mode='tf'
		elif args.model == 'mobilenet2':	
			model = MobileNetV2(include_top=False, weights='imagenet', pooling=args.pooling)
			preprocess_mode='tf'
		elif args.model == 'nasnet':	
			model = NASNetLarge(include_top=False, weights='imagenet', pooling=args.pooling)
			preprocess_mode='tf'
		elif args.model == 'resnet':
			model = ResNet50(include_top=False, weights='imagenet', pooling=args.pooling)
			preprocess_mode='caffe'
		elif args.model == 'vgg16':
			model = VGG16(include_top=False, weights='imagenet', pooling=args.pooling)
			preprocess_mode='caffe'
		elif args.model == 'vgg19':
			model = VGG19(include_top=False, weights='imagenet', pooling=args.pooling)
			preprocess_mode='caffe'
		else:
			print ("Model not found")
			return 0
	else:
		if args.model == 'inception':
			base_model = InceptionV3(include_top=False, weights='imagenet', pooling=args.pooling)
			model = Model(input=base_model.input, output=base_model.get_layer(args.output_layer).output)
			preprocess_mode='tf'
		elif args.model == 'xception':
			base_model = Xception(include_top=False, weights='imagenet', pooling=args.pooling)
			model = Model(input=base_model.input, output=base_model.get_layer(args.output_layer).output)
			preprocess_mode='tf'
		elif args.model == 'inceptionresnet':
			base_model = InceptionResNetV2(include_top=False, weights='imagenet', pooling=args.pooling)
			model = Model(input=base_model.input, output=base_model.get_layer(args.output_layer).output)
			preprocess_mode='tf'
		elif args.model == 'mobilenet':
			base_model = MobileNet(include_top=False, weights='imagenet', pooling=args.pooling)
			model = Model(input=base_model.input, output=base_model.get_layer(args.output_layer).output)
			preprocess_mode='tf'
		elif args.model == 'mobilenet2':	
			base_model = MobileNetV2(include_top=False, weights='imagenet', pooling=args.pooling)
			model = Model(input=base_model.input, output=base_model.get_layer(args.output_layer).output)
			preprocess_mode='tf'
		elif args.model == 'nasnet':	
			base_model = NASNetLarge(include_top=False, weights='imagenet', pooling=args.pooling)
			model = Model(input=base_model.input, output=base_model.get_layer(args.output_layer).output)
			preprocess_mode='tf'
		elif args.model == 'resnet':
			base_model = ResNet50(include_top=False, weights='imagenet', pooling=args.pooling)
			model = Model(input=base_model.input, output=base_model.get_layer(args.output_layer).output)
			preprocess_mode='caffe'
		elif args.model == 'vgg16':
			base_model = VGG16(include_top=False, weights='imagenet', pooling=args.pooling)
			model = Model(input=base_model.input, output=base_model.get_layer(args.output_layer).output)
			preprocess_mode='caffe'
		elif args.model == 'vgg19':
			base_model = VGG19(include_top=False, weights='imagenet', pooling=args.pooling)
			model = Model(input=base_model.input, output=base_model.get_layer(args.output_layer).output)
			preprocess_mode='caffe'
		else:
			print ("Model not found")
			return 0


	return model,preprocess_mode
예제 #19
0
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_integer("num_training_iterations", 2000,
                        "Number of iterations to train for.")
tf.flags.DEFINE_integer("report_interval", 100,
                        "Iterations between reports (samples, valid loss).")
tf.flags.DEFINE_integer("batch_size", 100, "Batch size for training.")
tf.flags.DEFINE_integer("output_size", 17, "Size of output layer.")
tf.flags.DEFINE_float("weight_decay_rate", 1.e-4, "Rate for Weight Decay.")
tf.flags.DEFINE_float("train_ratio", 0.8,
                      "Ratio of train data in the all data.")
tf.flags.DEFINE_float("init_lr", 1.e-3, "Initial learning rate.")
tf.flags.DEFINE_integer("decay_interval", 500, "lr decay interval.")
tf.flags.DEFINE_float("decay_rate", 0.5, "lr decay rate.")

# Xceptionモジュール
xception = Xception(FLAGS.output_size, name='Xception')

image_dir_path = 'jpg'
filelist = list(filter(lambda z: z[-4:] == '.jpg', os.listdir(image_dir_path)))
filelist = np.asarray([os.path.join(image_dir_path, f) for f in filelist])
idx = np.arange(len(filelist))
np.random.shuffle(idx)
border = int(len(filelist) * FLAGS.train_ratio)

# Flower data set モジュール
dataset_train = FlowerDataSet(file_list=filelist[:border],
                              image_size=(149, 149),
                              batch=FLAGS.batch_size,
                              name='Flower_dataset_train')
dataset_test = FlowerDataSet(file_list=filelist[border:],
                             image_size=(149, 149),
예제 #20
0
    model = densenet161_model(IMG_SIZE,
                              IMG_SIZE,
                              channel,
                              num_classes=num_classes)
elif (options.model == 'densenet169'):
    model = densenet169_model(IMG_SIZE,
                              IMG_SIZE,
                              channel,
                              num_classes=num_classes)
elif (options.model == 'InceptionResnet'):
    model = InceptionResNetV2(IMG_SIZE,
                              IMG_SIZE,
                              channel,
                              num_classes=num_classes)
elif (options.model == 'xception'):
    model = Xception(IMG_SIZE, IMG_SIZE, channel, num_classes=num_classes)
elif (options.model == 'inceptionv4'):
    model = inception_v4_model(IMG_SIZE,
                               IMG_SIZE,
                               channel,
                               num_classes=num_classes)

print(model.summary())

#data generator
datagen = ImageDataGenerator(
    featurewise_center=False,  # set input mean to 0 over the dataset
    samplewise_center=False,  # set each sample mean to 0
    featurewise_std_normalization=False,  # divide inputs by std of the dataset
    samplewise_std_normalization=False,  # divide each input by its std
    zca_whitening=False,  # apply ZCA whitening
예제 #21
0
파일: models.py 프로젝트: BaopingLiu/F3Net
class F3Net(nn.Module):
    def __init__(self,
                 num_classes=1,
                 img_width=299,
                 img_height=299,
                 LFS_window_size=10,
                 LFS_stride=2,
                 LFS_M=6,
                 mode='FAD',
                 device=None):
        super(F3Net, self).__init__()
        assert img_width == img_height
        img_size = img_width
        self.num_classes = num_classes
        self.mode = mode
        self.window_size = LFS_window_size
        self._LFS_M = LFS_M

        # init branches
        if mode == 'FAD' or mode == 'Both':
            self.FAD_head = FAD_Head(img_size)
            self.init_xcep_FAD()

        if mode == 'LFS' or mode == 'Both':
            self.LFS_head = LFS_Head(img_size, LFS_window_size, LFS_M)
            self.init_xcep_LFS()

        if mode == 'Original':
            self.init_xcep()

        # classifier
        self.relu = nn.ReLU(inplace=True)
        self.fc = nn.Linear(
            4096 if self.mode == 'Both' or self.mode == 'Mix' else 2048,
            num_classes)
        self.dp = nn.Dropout(p=0.2)

    def init_xcep_FAD(self):
        self.FAD_xcep = Xception(self.num_classes)

        # To get a good performance, using ImageNet-pretrained Xception model is recommended
        state_dict = get_xcep_state_dict()
        conv1_data = state_dict['conv1.weight'].data

        self.FAD_xcep.load_state_dict(state_dict, False)

        # copy on conv1
        # let new conv1 use old param to balance the network
        self.FAD_xcep.conv1 = nn.Conv2d(12, 32, 3, 2, 0, bias=False)
        for i in range(4):
            self.FAD_xcep.conv1.weight.data[:, i * 3:(i + 1) *
                                            3, :, :] = conv1_data / 4.0

    def init_xcep_LFS(self):
        self.LFS_xcep = Xception(self.num_classes)

        # To get a good performance, using ImageNet-pretrained Xception model is recommended
        state_dict = get_xcep_state_dict()
        conv1_data = state_dict['conv1.weight'].data

        self.LFS_xcep.load_state_dict(state_dict, False)

        # copy on conv1
        # let new conv1 use old param to balance the network
        self.LFS_xcep.conv1 = nn.Conv2d(self._LFS_M, 32, 3, 1, 0, bias=False)
        for i in range(int(self._LFS_M / 3)):
            self.LFS_xcep.conv1.weight.data[:, i * 3:(i + 1) *
                                            3, :, :] = conv1_data / float(
                                                self._LFS_M / 3.0)

    def init_xcep(self):
        self.xcep = Xception(self.num_classes)

        # To get a good performance, using ImageNet-pretrained Xception model is recommended
        state_dict = get_xcep_state_dict()
        self.xcep.load_state_dict(state_dict, False)

    def forward(self, x):
        if self.mode == 'FAD':
            fea_FAD = self.FAD_head(x)
            fea_FAD = self.FAD_xcep.features(fea_FAD)
            fea_FAD = self._norm_fea(fea_FAD)
            y = fea_FAD

        if self.mode == 'LFS':
            fea_LFS = self.LFS_head(x)
            fea_LFS = self.LFS_xcep.features(fea_LFS)
            fea_LFS = self._norm_fea(fea_LFS)
            y = fea_LFS

        if self.mode == 'Original':
            fea = self.xcep.features(x)
            fea = self._norm_fea(fea)
            y = fea

        if self.mode == 'Both':
            fea_FAD = self.FAD_head(x)
            fea_FAD = self.FAD_xcep.features(fea_FAD)
            fea_FAD = self._norm_fea(fea_FAD)
            fea_LFS = self.LFS_head(x)
            fea_LFS = self.LFS_xcep.features(fea_LFS)
            fea_LFS = self._norm_fea(fea_LFS)
            y = torch.cat((fea_FAD, fea_LFS), dim=1)

        f = self.dp(y)
        f = self.fc(f)
        return y, f

    def _norm_fea(self, fea):
        f = self.relu(fea)
        f = F.adaptive_avg_pool2d(f, (1, 1))
        f = f.view(f.size(0), -1)
        return f
예제 #22
0
                        type=str,
                        default="./data",
                        help='path where the dataset is saved')
    parser.add_argument('--ckpt_path',
                        type=str,
                        default="./checkpoint",
                        help='path where the checkpoint to be saved')
    parser.add_argument('--device_id',
                        type=int,
                        default=0,
                        help='device id of GPU. (Default: 0)')
    args = parser.parse_args()
    args.local_rank = 0
    args.world_size = 1

    network = Xception(num_classes=cfg.num_classes)
    # network = nn.DataParallel(network)
    network = network.cuda()
    criterion = nn.CrossEntropyLoss()
    #     optimizer = optim.RMSprop(network.parameters(),
    #                                 lr=cfg.lr_init,
    #                                 eps=cfg.rmsprop_epsilon,
    #                                 momentum=cfg.rmsprop_momentum,
    #                                 alpha=cfg.rmsprop_decay)
    optimizer = optim.SGD(network.parameters(),
                          lr=cfg.lr_init,
                          momentum=cfg.SGD_momentum)
    # prepare data
    # dataloader = create_dataset_pytorch(args.data_path + "/train")
    pipe = HybridTrainPipe(batch_size=cfg.batch_size,
                           num_threads=cfg.n_workers,
예제 #23
0
        tenant_id=config['tenant_id'],
        service_principal_id=config['service_principal_id'],
        service_principal_password=config['service_principal_password'])
except KeyError as e:
    print("Getting Service Principal Authentication from Azure Devops")
    svr_pr = None
    pass

ws = Workspace.from_config(path=config_json, auth=svc_pr)

try:
    model_root = Model.get_model_path('trained_xception', _workspace=ws)
except ModelNotFoundException as e:
    print("Didn't find model, cannot perform knowledge distillation.")

model = Xception()
model.load_weights(os.path.join(model_root, "xception_weights.hdf5"))

# Remove softmax
model.layers.pop()

# Now model outputs logits
model = KerasModel(model.input, model.layers[-1].output)

# # Save logits as a dict: image name -> logit (256 dimensional vector)
train_logits = {}

batches = 0

for x_batch, _, name_batch in tqdm(train_generator):