コード例 #1
0
def main_cs(mydict):
    # py脚本额外参数
    args = get_args()
    # main函数传入参数
    my_data_dir = mydict["data_dir"]
    my_ifSE = mydict["ifSE"]
    my_l1loss = mydict["l1loss"]
    my_resume = mydict["resume"]
    if my_l1loss:
        l1loss = 0.1
    else:
        l1loss = 0.0

    if args.opts:
        cfg.merge_from_list(args.opts)

    cfg.freeze()

    # create model_dir
    print("=> creating model_dir '{}'".format(cfg.MODEL.ARCH))
    # model = get_model(model_name=cfg.MODEL.ARCH, pretrained=None)
    model = my_model(my_ifSE)
    device = "cuda" if torch.cuda.is_available() else "cpu"
    model = model.to(device)

    # load checkpoint
    resume_path = my_resume

    if Path(resume_path).is_file():
        print("=> loading checkpoint '{}'".format(resume_path))
        checkpoint = torch.load(resume_path, map_location="cpu")
        model.load_state_dict(checkpoint['state_dict'])
        print("=>ckpt loaded checkpoint '{}'".format(resume_path))
    else:
        raise ValueError(
            "=> no checkpoTrueint found at '{}'".format(resume_path))

    if device == "cuda":
        cudnn.benchmark = True

    test_dataset = FaceDataset_morph2(my_data_dir,
                                      "test",
                                      img_size=cfg.MODEL.IMG_SIZE,
                                      augment=False)
    test_loader = DataLoader(test_dataset,
                             batch_size=cfg.BATCH_SIZE,
                             shuffle=False,
                             num_workers=cfg.TRAIN.WORKERS,
                             drop_last=False)

    print("=> start testing")
    _, _, test_cs = validate_cs(test_loader, model, None, 0, device, l1loss)
    print(f"test cs list: {test_cs}")
    return test_cs
コード例 #2
0
def main():
    start_time = smtp.print_time("开始测试!!!")
    # py脚本额外参数
    args = get_args()
    # main函数传入参数
    img_path = args.img_path
    my_resume = args.my_resume

    if args.opts:
        cfg.merge_from_list(args.opts)

    cfg.freeze()

    # create model_dir
    print("=> creating model_dir '{}'".format(cfg.MODEL.ARCH))
    model = my_model()
    device = "cuda" if torch.cuda.is_available() else "cpu"
    model = model.to(device)

    # load checkpoint
    resume_path = my_resume

    if Path(resume_path).is_file():
        print("=> loading checkpoint '{}'".format(resume_path))
        checkpoint = torch.load(resume_path, map_location="cpu")
        model.load_state_dict(checkpoint['state_dict'])
        print("=>ckpt loaded checkpoint '{}'".format(resume_path))
    else:
        raise ValueError(
            "=> no checkpoTrueint found at '{}'".format(resume_path))

    if device == "cuda":
        cudnn.benchmark = True

    print("=> start testing")
    # img_path, img_size, model, device
    predict_age = validate_age_estimation(img_path, cfg.MODEL.IMG_SIZE, model,
                                          device)
    print(f"predict_age: {predict_age:.2f}")
    end_time = smtp.print_time("测试结束!!!")
    print(smtp.date_gap_abs(start_time, end_time))
    return int(round(predict_age))
コード例 #3
0
ファイル: train.py プロジェクト: hcgcarry/DLSR_LAB_learning
    validation_set = brid("validation")
    validation_set = testing_augmentation(validation_set)
    #trainset= custom_dataset_skewed_food("training",transform_test)
    validation_loader = torch.utils.data.DataLoader(validation_set,
                                                    batch_size=32,
                                                    shuffle=False,
                                                    num_workers=0)
    #net = torch.load(modelPath,map_location=torch.device('cpu'))
    ######################################training statics
    return validation_loader, validation_set


def trainDataset_init():
    trainset = brid("training")
    trainset = training_augmentation(trainset)
    trainloader = torch.utils.data.DataLoader(trainset,
                                              batch_size=batch_size,
                                              shuffle=True,
                                              num_workers=0)
    return trainloader, trainset


trainloader, trainset = trainDataset_init()
validation_loader, validation_set = validationDataset_init()

model = my_model(num_of_class, init_lr, epoch_count)
print("model type", type(model))
print("model type", type(model.training))
model.train(trainloader, trainset, validation_loader, validation_set)
model.saveModel()
コード例 #4
0
    early_stop = keras.callbacks.EarlyStopping(monitor='val_loss',
                                               patience=10,
                                               verbose=0,
                                               mode='auto')
    tbCallBack = keras.callbacks.TensorBoard(
        log_dir='logs',  # log 目录
        histogram_freq=0,  # 按照何等频率(epoch)来计算直方图,0为不计算
        # batch_size=32,     # 用多大量的数据计算直方图
        write_graph=True,  # 是否存储网络结构图
        write_grads=True,  # 是否可视化梯度直方图
        write_images=True,  # 是否可视化参数
        embeddings_freq=0,
        embeddings_layer_names=None,
        embeddings_metadata=None)
    single_model = my_model(config_path,
                            checkpoint_path,
                            class_num,
                            trainable=False).get_single_model()
    single_model.fit_generator(
        train_gen.__iter__(mod="single"),
        steps_per_epoch=len(train_gen),
        callbacks=[history, ModelCheckpoint, tbCallBack],
        class_weight=single_class_weight,
        verbose=1,
        epochs=200,
        validation_data=valid_gen.__iter__(mod="single"),
        validation_steps=len(valid_gen))

    # multi_model = my_model(config_path, checkpoint_path, class_num, trainable=False).get_multi_model()
    # multi_model.fit_generator(train_gen.__iter__(mod="multi"), steps_per_epoch=len(train_gen),
    #                           callbacks=[history, ModelCheckpoint, tbCallBack], class_weight=multi_class_weight,
    #                           verbose=1, epochs=200, validation_data=valid_gen.__iter__(mod="multi"),
コード例 #5
0
import model as M
import matplotlib.pyplot as plt
import utilz as U
import numpy as np
from parser_utils import get_parser
import pickle

## Get options
options = get_parser().parse_args()
t_l_path = './fss_test_set.txt'
Best_performance = 0
Valid_miou = []

# Build the model
model = M.my_model(encoder='VGG_b345',
                   input_size=(options.img_h, options.img_w, 3),
                   k_shot=options.kshot,
                   learning_rate=options.learning_rate)
model.summary()

# Load an episode of train
Train_list, Test_list = U.Get_tr_te_lists(options, t_l_path)


# Train on episodes
def train(opt):
    for ep in range(opt.epochs):
        epoch_loss = 0
        epoch_acc = 0
        ## Get an episode for training model
        for idx in range(opt.iterations):
            support, smask, query, qmask = U.get_episode(opt, Train_list)
コード例 #6
0
ファイル: run.py プロジェクト: playing-code/Web-20
logger.info("create vocab successfully")


def transform(l1):
    global vocab
    l2 = []
    for word in l1:
        l2.append(vocab.get_id(word))
    return l2


train['token'] = train['token_list'].apply(transform)
val['token'] = val['token_list'].apply(transform)

logger.info("construct model")
model = my_model(vocab.embeddings)
model = model.to(device)
loss = torch.nn.CrossEntropyLoss()
pad_id = vocab.get_id(vocab.pad_token)

val_pad_len = min(max_len, max(list(val['length'])))
val['token_ids'] = [(ids + [pad_id] * (val_pad_len - len(ids)))[:val_pad_len]
                    for ids in val['token']]

logger.info('setting optimizer')
optimizer_lr = 0.001
optimizer_param = filter(
    lambda p: p.requires_grad,
    model.parameters())  # filter the parameters required to grad
optimizer = torch.optim.Adam(optimizer_param)
コード例 #7
0
def main(mydict):
    print("开始训练时间:")
    start_time = time.strftime('%Y-%m-%d %H:%M:%S',
                               time.localtime(time.time()))
    print(start_time)
    # py脚本额外参数
    args = get_args()
    # main函数传入参数
    my_data_dir = mydict["data_dir"]
    my_tensorboard = mydict["tensorboard"]
    my_checkpoint = mydict["checkpoint"]
    my_ifSE = mydict["ifSE"]
    my_l1loss = mydict["l1loss"]
    if my_l1loss:
        l1loss = 0.1  # 0.1
        # l1loss = my_l1value
    else:
        l1loss = 0.0

    if args.opts:
        cfg.merge_from_list(args.opts)

    cfg.freeze()
    start_epoch = 0

    # checkpoint_dir = Path(args.checkpoint)
    checkpoint_dir = Path(my_checkpoint)
    checkpoint_dir.mkdir(parents=True, exist_ok=True)

    # create model_dir
    print("=> creating model_dir '{}'".format("se_resnext50_32x4d"))
    # model = get_model(model_name="se_resnext50_32x4d")
    model = my_model(my_ifSE)

    if cfg.TRAIN.OPT == "sgd":
        optimizer = torch.optim.SGD(model.parameters(),
                                    lr=cfg.TRAIN.LR,
                                    momentum=cfg.TRAIN.MOMENTUM,
                                    weight_decay=cfg.TRAIN.WEIGHT_DECAY)
    else:
        optimizer = torch.optim.Adam(model.parameters(), lr=cfg.TRAIN.LR)

    device = "cuda" if torch.cuda.is_available() else "cpu"
    model = model.to(device)

    # optionally resume from a checkpoint
    resume_path = args.resume
    if resume_path:
        print(Path(resume_path).is_file())
        if Path(resume_path).is_file():
            print("=> loading checkpoint '{}'".format(resume_path))
            checkpoint = torch.load(resume_path, map_location="cpu")
            start_epoch = checkpoint['epoch']
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                resume_path, checkpoint['epoch']))
            optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        else:
            print("=> no checkpoint found at '{}'".format(resume_path))

    if args.multi_gpu:
        model = nn.DataParallel(model)

    if device == "cuda":
        cudnn.benchmark = True

    # 损失计算准则
    criterion = nn.CrossEntropyLoss().to(device)
    train_dataset = FaceDataset_ceface(my_data_dir,
                                       "train",
                                       img_size=cfg.MODEL.IMG_SIZE,
                                       augment=True,
                                       age_stddev=cfg.TRAIN.AGE_STDDEV)
    train_loader = DataLoader(train_dataset,
                              batch_size=cfg.BATCH_SIZE,
                              shuffle=True,
                              num_workers=cfg.TRAIN.WORKERS,
                              drop_last=True)

    val_dataset = FaceDataset_ceface(my_data_dir,
                                     "valid",
                                     img_size=cfg.MODEL.IMG_SIZE,
                                     augment=False)
    val_loader = DataLoader(val_dataset,
                            batch_size=cfg.BATCH_SIZE,
                            shuffle=False,
                            num_workers=cfg.TRAIN.WORKERS,
                            drop_last=False)

    scheduler = StepLR(optimizer,
                       step_size=cfg.TRAIN.LR_DECAY_STEP,
                       gamma=cfg.TRAIN.LR_DECAY_RATE,
                       last_epoch=start_epoch - 1)
    best_val_mae = 10000.0
    train_writer = None
    val_mae_list = []
    train_loss_list = []
    val_loss_list = []

    if my_tensorboard is not None:
        opts_prefix = "_".join(args.opts)
        train_writer = SummaryWriter(log_dir=my_tensorboard + "/" +
                                     opts_prefix + "_train")
        val_writer = SummaryWriter(log_dir=my_tensorboard + "/" + opts_prefix +
                                   "_val")

    for epoch in range(start_epoch, 80):  # cfg.TRAIN.EPOCHS):
        # train
        train_loss, train_acc = train(train_loader, model, criterion,
                                      optimizer, epoch, device, l1loss)
        train_loss_list.append(train_loss)

        # validate
        val_loss, val_acc, val_mae = validate(val_loader, model, criterion,
                                              epoch, device, l1loss)
        val_mae_list.append(val_mae)
        val_loss_list.append(val_loss)

        if my_tensorboard is not None:
            train_writer.add_scalar("loss", train_loss, epoch)
            train_writer.add_scalar("acc", train_acc, epoch)
            val_writer.add_scalar("loss", val_loss, epoch)
            val_writer.add_scalar("acc", val_acc, epoch)
            val_writer.add_scalar("mae", val_mae, epoch)

        if val_mae < best_val_mae or val_mae > 0:
            print(
                f"=> [epoch {epoch:03d}] best val mae was improved from {best_val_mae:.3f} to {val_mae:.3f}"
            )
            best_val_mae = val_mae
            # checkpoint
            if val_mae < 4.0:
                model_state_dict = model.module.state_dict(
                ) if args.multi_gpu else model.state_dict()
                torch.save(
                    {
                        'epoch': epoch + 1,
                        'arch': cfg.MODEL.ARCH,
                        'state_dict': model_state_dict,
                        'optimizer_state_dict': optimizer.state_dict()
                    },
                    str(
                        checkpoint_dir.joinpath(
                            "epoch{:03d}_{:.5f}_{:.4f}.pth".format(
                                epoch, val_loss, val_mae))))
        else:
            print(
                f"=> [epoch {epoch:03d}] best val mae was not improved from {best_val_mae:.3f} ({val_mae:.3f})"
            )

        # adjust learning rate
        scheduler.step()

    print("=> training finished")
    print(f"additional opts: {args.opts}")
    print(f"best val mae: {best_val_mae:.3f}")
    print("结束训练时间:")
    end_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
    print(end_time)
    print("训练耗时: " + smtp.date_gap(start_time, end_time))
    # 发邮件
    smtp.main(
        dict_={
            "共训练epochs: ": cfg.TRAIN.EPOCHS,
            "训练耗时: ": smtp.date_gap(start_time, end_time),
            "最低val_mae: ": best_val_mae,
            "平均val_mae: ": np.array(val_mae_list).mean(),
            "vale_mae_list: ": val_mae_list,
            "train_loss_list: ": train_loss_list,
            "val_loss_list: ": val_loss_list,
            "MODEL.IMG_SIZE: ": cfg.MODEL.IMG_SIZE,
            "BATCH_SIZE: ": cfg.BATCH_SIZE,
            "LOSS.l1: ": l1loss,
            "TRAIN.LR: ": cfg.TRAIN.LR,
            "TRAIN.LR_DECAY_STEP: ": cfg.TRAIN.LR_DECAY_STEP,
            "TRAIN.LR_DECAY_RATE:": cfg.TRAIN.LR_DECAY_RATE,
            "TRAIN.OPT: ": cfg.TRAIN.OPT,
            "MODEL.ARCH:": cfg.MODEL.ARCH
        })
    return best_val_mae
コード例 #8
0
#%%
X_train_all, y_train_all, driver_id, unique_drivers = inp.load_train()
X_train, y_train, train_index = inp.copy_selected_drivers(X_train_all, 
                                                      y_train_all,
                                                      driver_id, 
                                                      unique_list_train)
X_valid, y_valid, test_index = inp.copy_selected_drivers(X_train_all, 
                                                     y_train_all, 
                                                     driver_id,
                                                     unique_list_valid)
y_train = tf.keras.utils.to_categorical(y_train, 10)
y_valid = tf.keras.utils.to_categorical(y_valid, 10)

#%%
model = md.my_model(img_w, img_h, color_type)
#model = tf.keras.models.load_model('log/weights-0.8149.hdf5')
tf.keras.utils.plot_model(model,'models/vgg_std16_model.png')
#%%
w = model.layers[-1].get_weights()
#%%
from tensorflow.python.keras._impl.keras import backend as K
import cv2
def visualize_class_activation_map(model, img_path, target_class):
    origin_img = inp.get_im_cv2([img_path], 224, 224, 3)
    class_weights = model.layers[-1].get_weights()[0]

    final_conv_layer = model.layers[17]
    get_output = K.function([model.layers[0].input],[final_conv_layer.output, model.layers[-1].output])
    [conv_outputs, predictions] = get_output([origin_img])
コード例 #9
0
 def test_is_dict(self):
     s = model.my_model(1, 100)
     print(s)
     self.assertTrue(isinstance(s, dict))
コード例 #10
0
 def test_is_inRange(self):
     s = model.my_model(1, 100)
     self.assertTrue(s, dict)
コード例 #11
0
ファイル: main.py プロジェクト: vunb211197/Multi_Learning
import argparse
import cv2
from model import my_model


def getArgument():
    arg = argparse.ArgumentParser()
    # định nghĩa một tham số cần parse
    arg.add_argument('-i', '--image_path', help='link to image')
    # Giúp chúng ta convert các tham số nhận được thành một object và gán nó thành một thuộc tính của một namespace.
    return arg.parse_args()


arg = getArgument()

# đọc được các thuộc tính  từ đường dẫn

img = cv2.imread(arg.image_path)
img = cv2.resize(img, (96, 96))

my_model = my_model(False)
#predict hình ảnh
my_model.predict(img)

#show ảnh
cv2.imshow("Image", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
コード例 #12
0
                                             n_classes=n_classes)
        if Config['finetune'] == 'MobileNet':
            model = applications.mobilenet.MobileNet(include_top=True,
                                                     weights='imagenet',
                                                     n_classes=n_classes)
        if Config['finetune'] == 'ResNet50':
            model = applications.resnet.ResNet50(include_top=True,
                                                 weights='imagenet',
                                                 n_classes=n_classes)
        if Config['finetune'] == 'InceptionV3':
            model = applications.inception_v3.InceptionV3(include_top=True,
                                                          weights='imagenet',
                                                          n_classes=n_classes)
    else:
        if Config['restore']:
            # restore training
            model = load_model(Config['restore_file'])
        else:
            # training from scratch
            model = my_model(classes=n_classes)

    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    model.summary()

    results = model.fit_generator(generator=train_generator,
                                  validation_data=test_generator,
                                  epochs=Config['num_epochs'])
    save_info(model, results)
コード例 #13
0
def main(data_dir):
    print("开始训练时间:")
    start_time = time.strftime('%Y-%m-%d %H:%M:%S',
                               time.localtime(time.time()))
    print(start_time)
    args = get_args()

    if args.opts:
        cfg.merge_from_list(args.opts)

    cfg.freeze()
    start_epoch = 0
    # checkpoint_dir = Path(args.checkpoint)
    # checkpoint_dir.mkdir(parents=True, exist_ok=True)

    # create model_dir
    print("=> creating model_dir '{}'".format(cfg.MODEL.ARCH))
    # model_dir = get_model(model_name=cfg.MODEL.ARCH)
    model = my_model(True)

    if cfg.TRAIN.OPT == "sgd":
        optimizer = torch.optim.SGD(model.parameters(),
                                    lr=cfg.TRAIN.LR,
                                    momentum=cfg.TRAIN.MOMENTUM,
                                    weight_decay=cfg.TRAIN.WEIGHT_DECAY)
    else:
        optimizer = torch.optim.Adam(model.parameters(), lr=cfg.TRAIN.LR)

    device = "cuda" if torch.cuda.is_available() else "cpu"
    model = model.to(device)

    # optionally resume from a checkpoint
    resume_path = args.resume

    if resume_path:
        print(Path(resume_path).is_file())
        if Path(resume_path).is_file():
            print("=> loading checkpoint '{}'".format(resume_path))
            checkpoint = torch.load(resume_path, map_location="cpu")
            start_epoch = checkpoint['epoch']
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                resume_path, checkpoint['epoch']))
            optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        else:
            print("=> no checkpoint found at '{}'".format(resume_path))

    if args.multi_gpu:
        model = nn.DataParallel(model)

    if device == "cuda":
        cudnn.benchmark = True

    # 损失计算准则
    criterion = nn.CrossEntropyLoss().to(device)
    train_dataset = FaceDataset_FGNET(data_dir,
                                      "train",
                                      img_size=cfg.MODEL.IMG_SIZE,
                                      augment=True,
                                      age_stddev=cfg.TRAIN.AGE_STDDEV)
    train_loader = DataLoader(train_dataset,
                              batch_size=cfg.BATCH_SIZE,
                              shuffle=True,
                              num_workers=cfg.TRAIN.WORKERS,
                              drop_last=True)

    val_dataset = FaceDataset_FGNET(data_dir,
                                    "test",
                                    img_size=cfg.MODEL.IMG_SIZE,
                                    augment=False)
    val_loader = DataLoader(val_dataset,
                            batch_size=cfg.BATCH_SIZE,
                            shuffle=False,
                            num_workers=cfg.TRAIN.WORKERS,
                            drop_last=False)

    scheduler = StepLR(optimizer,
                       step_size=cfg.TRAIN.LR_DECAY_STEP,
                       gamma=cfg.TRAIN.LR_DECAY_RATE,
                       last_epoch=start_epoch - 1)
    best_val_mae = 10000.0
    train_writer = None
    val_mae_list = []

    if args.tensorboard is not None:
        opts_prefix = "_".join(args.opts)
        train_writer = SummaryWriter(log_dir=args.tensorboard + "/" +
                                     opts_prefix + "_train")
        val_writer = SummaryWriter(log_dir=args.tensorboard + "/" +
                                   opts_prefix + "_val")

    for epoch in range(start_epoch, cfg.TRAIN.EPOCHS):
        # train
        train_loss, train_acc = train(train_loader, model, criterion,
                                      optimizer, epoch, device)

        # validate
        val_loss, val_acc, val_mae = validate(val_loader, model, criterion,
                                              epoch, device)
        val_mae_list.append(val_mae)

        if args.tensorboard is not None:
            train_writer.add_scalar("loss", train_loss, epoch)
            train_writer.add_scalar("acc", train_acc, epoch)
            val_writer.add_scalar("loss", val_loss, epoch)
            val_writer.add_scalar("acc", val_acc, epoch)
            val_writer.add_scalar("mae", val_mae, epoch)

        if val_mae < best_val_mae:
            print(
                f"=> [epoch {epoch:03d}] best val mae was improved from {best_val_mae:.3f} to {val_mae:.3f}"
            )
            best_val_mae = val_mae
            # checkpoint
            # if val_mae < 2.1:
            #     model_state_dict = model.module.state_dict() if args.multi_gpu else model.state_dict()
            #     torch.save(
            #         {
            #             'epoch': epoch + 1,
            #             'arch': cfg.MODEL.ARCH,
            #             'state_dict': model_state_dict,
            #             'optimizer_state_dict': optimizer.state_dict()
            #         },
            #         str(checkpoint_dir.joinpath("epoch{:03d}_{:.5f}_{:.4f}.pth".format(epoch, val_loss, val_mae)))
            #     )
        else:
            print(
                f"=> [epoch {epoch:03d}] best val mae was not improved from {best_val_mae:.3f} ({val_mae:.3f})"
            )

        # adjust learning rate
        scheduler.step()

    print("=> training finished")
    print(f"additional opts: {args.opts}")
    print(f"best val mae: {best_val_mae:.3f}")
    print("结束训练时间:")
    end_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
    print(end_time)
    print("训练耗时: " + smtp.date_gap(start_time, end_time))
    return best_val_mae