Esempio n. 1
0
def predict_on_extra(model, path, class_dict):
    if torch.cuda.is_available():
        model.cuda()
    file_list = os.listdir(path)
    points = []
    transfrom = get_transform(input_size=cfg.INPUT_SIZE, imgset="test")
    for i in tqdm(file_list):
        img_path = os.path.join(path, i)
        save_path = os.path.join(res_path, i)
        img = cv2.imread(img_path)
        ii = 0
        jj = 0
        img_w, img_h, _ = img.shape
        while ii + 84 < img_w:
            while jj + 84 < img_h:
                to_pred = img[ii:ii + 84, jj:jj + 84]
                to_pred = Image.fromarray(to_pred).convert('RGB')
                to_pred = transfrom(to_pred)
                to_pred = to_pred.unsqueeze(0)
                if torch.cuda.is_available():
                    to_pred = to_pred.cuda()
                with torch.no_grad():
                    out = model(to_pred)
                class_ = torch.argmax(out, dim=1).cpu().item()
                if class_dict[class_] == 'garbage':
                    points.append((jj + 42, ii + 42))
                jj += 84
            ii += 84
            jj = 0
        for point in points:
            cv2.circle(img, point, radius=15, color=(0, 0, 255), thickness=-1)
        cv2.imwrite(save_path, img)
Esempio n. 2
0
def image_generator(increment, counter):
    to_net = np.empty((batch_size, 32, 32, 3), 'float32')
    for i in range(batch_size):
        tr = dataset.get_transform(counter)
        to_net[i] = dataset.idct(tr)  # ycc format
        counter = np.mod(np.add(counter, increment), dataset.quantization)

    to_net = np.divide(np.subtract(to_net, scale), scale)
    # print('batch stats: max={}, min={}'.format(to_net.max(), to_net.min()))
    return to_net, counter
Esempio n. 3
0
    parser.add_argument('--train_way', type=int, default=30)
    parser.add_argument('--valid_way', type=int, default=5)
    parser.add_argument('--hallu_m', type=int, default=20)
    parser.add_argument('--distance',
                        type=str,
                        default='parametric',
                        choices=['euclidian', 'cosine', 'parametric'])
    parser.add_argument('--device', type=str, default='cuda')
    args = parser.parse_args()  ###############################

    args.name = f'hallu{args.hallu_m}_shot{args.shot}_trainway{args.train_way}'+\
                f'_validway{args.valid_way}_{args.distance}'
    wandb.init(config=args, project='dlcv_proto_net', name=args.name)

    # Image transform
    train_trans, valid_trans = get_transform()

    # Train data
    train_set = MiniImageNet_Dataset('../hw4_data/train/', train_trans)
    train_sampler = CategoriesSampler(train_set.label,
                                      n_batch=args.n_batch,
                                      n_ways=args.train_way,
                                      n_shot=args.shot + args.query)
    train_loader = DataLoader(train_set,
                              batch_sampler=train_sampler,
                              num_workers=6,
                              worker_init_fn=worker_init_fn)

    # Valid data
    valid_set = MiniImageNet_Dataset('../hw4_data/val/', valid_trans)
    valid_sampler = CategoriesSampler(valid_set.label,
import torch
from models.MaskRCNN import get_model_instance_segmentation

from dataset import PennFudanDataset, get_transform
from references.engine import train_one_epoch, evaluate
from references import utils

# train on the GPU or the CPU, if a GPU is not available
device = torch.device('cuda') if torch.cuda.is_available() else torch.device(
    'cpu')

# our dataset has two classes only - background and person
num_classes = 2
# use out dataset an defined transformations
dataset = PennFudanDataset('PennFudanPed', get_transform(train=True))
dataset_test = PennFudanDataset('PennFudanPed', get_transform(train=False))

# split the dataset in train and test set
indices = torch.randperm(len(dataset)).tolist()
dataset = torch.utils.data.Subset(dataset, indices[:-50])
dataset_test = torch.utils.data.Subset(dataset_test, indices[-50:])

# define training and validation data loaders
data_loader = torch.utils.data.DataLoader(dataset,
                                          batch_size=2,
                                          shuffle=True,
                                          num_workers=4,
                                          collate_fn=utils.collate_fn)

data_loader_test = torch.utils.data.DataLoader(dataset_test,
                                               batch_size=1,
Esempio n. 5
0
    torch.cuda.set_device(params.gpu_ids[0])

params.nThreads = 1  # test code only supports nThreads = 1
params.batchSize = 1  # test code only supports batchSize = 1
params.serial_batches = True  # no shuffle
params.no_flip = True  # no flip

###

create_dir(Path(params.results_dir))
inference_images = make_dataset(params.source_dir)
num_inference_images = len(inference_images)
print(f'#inference images = {num_inference_images}')

model = create_model(params)
transform = get_transform(params)
start_time = time.time()

for i, img_path in enumerate(inference_images):

    if params.how_many:
        if i >= params.how_many:
            print(f'how_many: {params.how_many}')
            break

    frameid = img_path.split('/')[-1].replace('.png', '')
    target_frameid = f'H{frameid[1:]}'
    target_path = os.path.join(params.target_dir, f'{target_frameid}.png')

    source_img = Image.open(img_path).convert('RGB')
    target_img = Image.open(target_path).convert('RGB')
    ckpt = torch.load(os.path.join(gen_path, model_mask_newAS))
    model_to_test.load_state_dict(ckpt['model'])

if cnn == '2':
    model_to_test = get_model_frcnn_fpn_new_anchor(num_classes = 2, pretrained = True, new_AS = False, focal_loss = False)
    ckpt = torch.load(os.path.join(gen_path, model_frcnn_stdAS))
    model_to_test.load_state_dict(ckpt['model'])

if cnn == '3':
    model_to_test = get_model_frcnn_fpn_new_anchor(num_classes = 2, pretrained = True, new_AS = True, focal_loss = False)
    ckpt = torch.load(os.path.join(gen_path, model_frcnn_newAS))
    model_to_test.load_state_dict(ckpt['model'])

dataset_test = CERNDataset_Test(root = os.path.join(dataset, 'test'),
                                data_file = os.path.join(dataset, 'test', 'data/labels.csv'),
                                transforms = get_transform(train = False))

''''
    images processing 
                        '''

print('images processing has started')

for idx in range(len(dataset_test.imgs)):
    img, _ = dataset_test[idx]
    label_boxes = np.array(dataset_test[idx][1]["boxes"])

    model_to_test.eval()
    with torch.no_grad():
        prediction = model_to_test([img])
Esempio n. 7
0
def main(argv):
    modelPath = 'model.pth'
    selectedDevice = ''
    source = ''
    youtube = False
    try:
        opts, args = getopt.getopt(argv, "p:d:s:",
                                   ["modelPath=", "device=", "source="])
    except getopt.GetoptError:
        print("Error parsing arguments")
        sys.exit(2)
    for opt, arg in opts:
        if opt in ("-p", "--modelPath"):
            if arg.endswith(".pth") is True or arg.endswith('.pt') is True:
                modelPath = arg
            else:
                print("modelPath must end in .pth or .pt")
                sys.exit(2)
        if opt in ("-d", "--device"):
            lower = arg.lower()
            if lower == "gpu" or lower == "cpu":
                selectedDevice = arg.lower()
            else:
                print("device must be either 'gpu' or 'cpu'")
                sys.exit(2)
        if opt in ("-s", "--source"):
            if arg.startswith("http://") is True or arg.startswith(
                    "https://") is True:
                if arg.startswith(
                        "https://www.youtube.com") is True or arg.startswith(
                            "https://youtube.com") is True or arg.startswith(
                                "https://youtu.be") is True:
                    youtube = True
                source = arg
            else:
                print("Source must begin with http:// or https://")
                sys.exit(2)

    if source == '':
        print("Please provide a source to evaluate using -s or --source=")
        sys.exit(2)

    # evaluate on the GPU or on the CPU, if a GPU is not available
    device = torch.device(
        'cuda') if torch.cuda.is_available() else torch.device('cpu')

    # check if user has requested either cpu or gpu processing
    if torch.cuda.is_available() and selectedDevice == '':
        print("CUDA device is detected, using GPU for training and evaluation")
    elif selectedDevice != '':
        if selectedDevice == 'gpu':
            if torch.cuda.is_available() is False:
                print("Cannot find CUDA driver or device")
                sys.exit(2)
            device = torch.device('cuda')
        if selectedDevice == 'cpu':
            device = torch.device('cpu')

    model = get_model_instance_segmentation(2)
    model.load_state_dict(torch.load(modelPath))
    model.eval()
    model = model.to(device)

    img = None
    if youtube is True:
        vPafy = pafy.new(source)
        play = vPafy.getbest()
        cap = cv2.VideoCapture(play.url)
        _, frame = cap.read()
        img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        cap.release()
        cv2.destroyAllWindows()
    else:
        with urllib.request.urlopen(source) as url:
            image = np.asarray(bytearray(url.read()), dtype="uint8")
            img = cv2.imdecode(image, cv2.IMREAD_COLOR)

    # You may need to convert the color.

    im_pil = Image.fromarray(img).convert("RGB")
    transform = get_transform(train=False)
    im_pil = transform(im_pil, None)
    imList = [im_pil[0]]
    ok = list(image.to(device) for image in imList)
    output = model(ok)

    positives = 0
    weak = 0
    for score in output[0]['scores']:
        if score > 0.99:
            positives = positives + 1
        else:
            weak = weak + 1

    print("---- Results ----")
    if positives == 0:
        print("Did not detect any people")
    else:
        print("Detected " + str(positives) + " people")
        print("Counted " + str(weak) +
              " additional weak signals below 99% confidence")
Esempio n. 8
0
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
test_trans = transforms.Compose([
    transforms.Resize((224, 224)),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])

dataset = VRDFRDataset(dataset_path='./',
                       type='train',
                       num_classes=100,
                       cls_transform=train_trans,
                       ifselected=False,
                       ifdtc=True,
                       dtc_transform=get_transform(train=True))
data_loader = DataLoader(dataset,
                         batch_size=2,
                         shuffle=True,
                         num_workers=8,
                         collate_fn=dataset.collate_fn)

test_dataset = VRDFRDataset(dataset_path='./',
                            type='test',
                            num_classes=100,
                            cls_transform=test_trans,
                            ifselected=True,
                            ifdtc=True,
                            dtc_transform=get_transform(train=False))
test_data_loader = DataLoader(test_dataset,
                              batch_size=2,
Esempio n. 9
0
def main():
    dataset = options.dataset
    writer = SummaryWriter(tb_path)
    data_train = CERNDataset(root=os.path.join('Datasets', dataset),
                             data_file=os.path.join('Datasets', dataset,
                                                    'data/labels.csv'),
                             transforms=get_transform(train=True))
    data_test = CERNDataset(root=os.path.join('Datasets', dataset),
                            data_file=os.path.join('Datasets', dataset,
                                                   'data/labels.csv'),
                            transforms=get_transform(train=False))

    torch.manual_seed(1)

    indices = torch.randperm(len(data_train)).tolist()
    if options.dataset == 'GDXray_dataset/datasets_cropped':
        len_set = int(len(data_train) * 0.4) * options.reduced
        splitter = int(len_set * 0.3)
        dataset_train = torch.utils.data.Subset(data_train,
                                                indices[:(len_set - splitter)])
        dataset_test = torch.utils.data.Subset(data_test, indices[-splitter:])
        dataloader_train = torch.utils.data.DataLoader(
            dataset=dataset_train,
            batch_size=options.batch_size,
            shuffle=True,
            num_workers=options.num_workers,
            collate_fn=utils.collate_fn)
        dataloader_test = torch.utils.data.DataLoader(
            dataset=dataset_test,
            batch_size=1,
            shuffle=False,
            num_workers=options.num_workers,
            collate_fn=utils.collate_fn)

    else:
        splitter = int(len(data_train) * 0.3)
        dataset_train = torch.utils.data.Subset(data_train,
                                                indices[:-splitter])
        dataset_test = torch.utils.data.Subset(data_test, indices[-splitter:])
        dataloader_train = torch.utils.data.DataLoader(
            dataset=dataset_train,
            batch_size=options.batch_size,
            shuffle=True,
            num_workers=options.num_workers,
            collate_fn=utils.collate_fn)
        dataloader_test = torch.utils.data.DataLoader(
            dataset=dataset_test,
            batch_size=1,
            shuffle=False,
            num_workers=options.num_workers,
            collate_fn=utils.collate_fn)
    #     ## saving dataset in tb_log
    #
    # seq = iaa.Sequential([
    #     iaa.Resize({"height": 700, "width": 700})
    # ])
    #
    # arrays = []
    # images = []
    # arrays_aug_img = []
    # tensors_aug_img = []
    # labels = []
    # images_aug_with_boxes = []
    # tensors_aug_img_with_boxes = []
    # for batch_idx, (image, label) in enumerate(dataloader_train):
    #     img = image[0].numpy().transpose(1,2,0)
    #     img_aug = seq(image = img)
    #     img_aug_tensor = torch.from_numpy(img_aug.transpose(2,0,1))
    #
    #     images.append(image[0].unsqueeze(0))
    #     arrays.append(img)
    #     labels.append(label)
    #     arrays_aug_img.append(img_aug)
    #     tensors_aug_img.append(img_aug_tensor)
    #
    #     boxes = []
    #     for box in labels[0][0]['boxes']:
    #         box = box.tolist()
    #         boxes.append(box)
    #     my_bbs = []
    #     for b in boxes:
    #         bb = ia.BoundingBox(x1 = b[0], y1 = b[1], x2 = b[2], y2 = b[3])
    #         my_bbs.append(bb)
    #     bbs_oi = BoundingBoxesOnImage(my_bbs, shape = img.shape)
    #     img_aug, bbs_aug = seq(image = img, bounding_boxes = bbs_oi)
    #     bbs_aug_no_fout_clipart = bbs_aug.remove_out_of_image().clip_out_of_image()
    #     image_aug_with_boxes = bbs_aug_no_fout_clipart.draw_on_image(img_aug, size=2, color=[0,0,255])
    #     images_aug_with_boxes.append(image_aug_with_boxes)
    #     tensor_aug_img_with_boxes = torch.from_numpy(image_aug_with_boxes.transpose(2,0,1))
    #     tensors_aug_img_with_boxes.append(tensor_aug_img_with_boxes)
    #
    # grid = torchvision.utils.make_grid(tensors_aug_img, padding = 20)
    # grid_1 = torchvision.utils.make_grid(tensors_aug_img_with_boxes, padding = 20)
    # writer.add_image('images', grid)
    #
    # ##  TODO "Resize" imgaug-function seems to not work correctly in annotations
    # writer.add_image('images_with_boxes', grid_1)

    device = torch.device(
        'cuda') if torch.cuda.is_available() else torch.device('cpu')

    num_classes = options.num_classes
    if options.model == 1:
        model = get_model_frcnn_fpn_new_anchor(num_classes, options.pretrained,
                                               options.anchor_size)
    if options.model == 0:
        model = get_model_masck_fpn_new_anchor(num_classes, options.pretrained,
                                               options.anchor_size)

    model.to(device)
    #writer.add_graph(model, tensors_aug_img)

    params = [p for p in model.parameters() if p.requires_grad]
    total_parameter = sum(p.numel() for p in model.parameters())
    total_parameter_trainable = sum(p.numel() for p in model.parameters()
                                    if p.requires_grad)
    optimizer = torch.optim.SGD(params=params,
                                lr=options.learning_rate,
                                momentum=options.momentum,
                                weight_decay=options.weight_decay)
    lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer=optimizer,
                                                   step_size=3,
                                                   gamma=0.1)
    es = EarlyStopping(patience=options.patience,
                       verbose=True,
                       delta=options.delta)

    #   resume non finished training:
    if options.resume:
        if os.path.isfile(options.resume):
            print("loading weights '{}'".format(options.resume))
            checkpoint = torch.load(options.resume)
            model.load_state_dict(checkpoint['model'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            resume_epoch = checkpoint['epoch']
            print("loaded checkpoint'{}' (epoch {})".format(
                options.resume, checkpoint['epoch']))
        else:
            print("no checkpoint foung ar'{}'".format(options.resume))
    else:
        resume_epoch = 0

    num_epochs = options.epochs

    for epoch in range(resume_epoch, num_epochs):
        train_one_epoch(model=model,
                        optimizer=optimizer,
                        data_loader=dataloader_train,
                        device=device,
                        epoch=epoch,
                        print_freq=10,
                        writer=writer,
                        ckpt_path=ckpt_path)

        lr_scheduler.step()
        (coco, map05) = evaluate(model=model,
                                 data_loader=dataloader_test,
                                 device=device,
                                 writer=writer,
                                 epoch=epoch)
        es(val_acc=map05,
           model=model,
           path=best_path,
           epoch=epoch,
           optimizer=optimizer)
Esempio n. 10
0
def main():

    config_path = "config.yaml"
    with open(config_path, 'r') as fp:
        config = yaml.safe_load(fp)

    coco_train_imgs = os.path.join(config['path'], "images/train2017")
    coco_train_annos_path = os.path.join(
        config['path'], "annotations/person_keypoints_train2017.json")

    coco_val_imgs = os.path.join(config['path'], "images/val2017")
    coco_val_annos_path = os.path.join(
        config['path'], "annotations/person_keypoints_val2017.json")

    epochs = config['num_epochs']

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    train_dataset = COCODataset(coco_train_imgs,
                                coco_train_annos_path,
                                num_keypoints=17,
                                transform=get_transform(train=True))

    val_dataset = COCODataset(coco_val_imgs,
                              coco_val_annos_path,
                              num_keypoints=17,
                              transform=get_transform(train=False))

    train_dataloader = DataLoader(train_dataset,
                                  batch_size=8,
                                  shuffle=True,
                                  num_workers=4)
    val_dataloader = DataLoader(val_dataset,
                                batch_size=4,
                                shuffle=True,
                                num_workers=4)

    model = PoseModel(num_kpts=17)
    model.to(device)
    model.train()

    criterion = JointMSELoss()
    optimizer = optim.SGD(model.parameters(),
                          lr=config['learning_rate'],
                          momentum=config['momentum'])

    logs = []

    for e in range(epochs):
        running_loss = 0.0

        for i, data in enumerate(train_dataloader):
            imgs, _, heatmaps = data
            imgs = imgs.to(device)
            heatmaps = heatmaps.to(device)

            optimizer.zero_grad()

            outs = model(imgs)

            loss = criterion(outs.float(), heatmaps.float())

            loss.backward()

            optimizer.step()
            running_loss += loss.item()

            if i % 1000 == 999:
                print("Epoch {} | Iteration {} | Loss {:.4f}".format(
                    e + 1, i + 1, running_loss / 2000.))
                logs.append("Epoch {} | Iteration {} | Loss {:.4f}".format(
                    e + 1, i + 1, running_loss / 2000.))
                running_loss = 0.0
                validate(model, val_dataloader, criterion, device)
                model.train()

        print("Save model")
        torch.save(model.state_dict(), f'model_epochs_{e}.pth')

    print("Test training finished.")
    torch.save(model.state_dict(), f'model_epochs_{epochs}.pth')

    with open("logs.txt", 'a') as fp:
        for el in logs:
            fp.write(el + "\n")

    print("Logs saved")
Esempio n. 11
0
path = './data/submission_format.csv'
# path = './data/train.csv'
df = pd.read_csv(path)

side_df = pd.read_csv('./data/official_test.csv')
ids = side_df.image_path
# ids = df.image_id

# prefix = 'C:/Users/Admin/Desktop/Wind_data/test/'
# prefix = 'D:/Predict_Wind/test/'

# ids = [prefix + str(i) + '.jpg' for i in ids]
batch_size = 150

_, transform = get_transform(366)

dataset_test = WindDataset(image_list=ids, transform=transform, test=True)

shuffle_loader = DataLoader(dataset_test,
                            batch_size=batch_size,
                            shuffle=True,
                            num_workers=8)

test_loader = DataLoader(dataset_test,
                         batch_size=batch_size,
                         shuffle=False,
                         num_workers=8)

warm_up = True
train_mode = False
Esempio n. 12
0
    correct = 0
    total = 0
    with torch.no_grad():
        for data in data_loader:
            images, labels = data
            outputs = net(images)
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()

    print('Accuracy of the network on the %s val images: %d %%' % (len(sampler),
                                                                   100 * correct / total))


dataset = CustomDataset(get_transform(True), labels)

batch_size = 4
validation_split = .2
shuffle_dataset = True
random_seed = 42

dataset_size = len(dataset)
indices = list(range(dataset_size))
split = int(np.floor(validation_split * dataset_size))
if shuffle_dataset:
    np.random.seed(random_seed)
    np.random.shuffle(indices)
train_indices, val_indices = indices[split:], indices[:split]

valid_sampler = SubsetRandomSampler(val_indices)
Esempio n. 13
0
from dataset import MiniImageNet_Dataset, get_transform

trans, _ = get_transform()
train_set = MiniImageNet_Dataset('../hw4_data/train/', trans)
valid_set = MiniImageNet_Dataset('../hw4_data/val/', trans)

print(train_set.data[:10], train_set.label[:10])
print(valid_set.data[:10], valid_set.label[:10])
print(len(set(train_set.label)))
Esempio n. 14
0
def main():
    anchors = [30, 54, 95]
    shuffle = not (args.no_shuffle)
    exp = args.exp
    warm_up_epoch = 3

    # Load and process data

    if args.fold:
        df_train = pd.read_csv(args.data_path +
                               'k_fold/official_train_fold%d.csv' %
                               (args.fold))
        df_val = pd.read_csv(args.data_path +
                             'k_fold/official_val_fold%d.csv' % (args.fold))
    else:
        df_train = pd.read_csv(args.data_path + 'official_train.csv')
        df_val = pd.read_csv(args.data_path + 'official_val.csv')

    train = df_train.image_path.to_list()
    val = df_val.image_path.to_list()
    if exp:
        y_train = df_train.anchor.to_list()
        y_val = df_val.anchor.to_list()
        reg_train_gt = df_train.exp_wind.to_list()
        reg_val_gt = df_val.exp_wind.to_list()
    else:
        y_train = df_train.wind_speed.to_list()
        y_val = df_val.wind_speed.to_list()

    train_transform, val_transform = get_transform(args.image_size)

    train_dataset = WindDataset(image_list=train,
                                target=y_train,
                                exp_target=reg_train_gt if exp else None,
                                transform=train_transform)

    val_dataset = WindDataset(image_list=val,
                              target=y_val,
                              exp_target=reg_val_gt if exp else None,
                              transform=val_transform)

    train_loader = DataLoader(dataset=train_dataset,
                              batch_size=args.batch_size,
                              shuffle=shuffle,
                              num_workers=args.num_workers,
                              drop_last=True)

    val_loader = DataLoader(dataset=val_dataset,
                            batch_size=args.batch_size,
                            shuffle=False,
                            num_workers=args.num_workers,
                            drop_last=True)

    warm_loader = DataLoader(dataset=train_dataset,
                             batch_size=args.batch_size * 14,
                             shuffle=shuffle,
                             num_workers=args.num_workers,
                             drop_last=True)

    # Load model
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    last_epoch = 0

    # model = ResNet50_BN_idea()
    if not exp:
        model = Effnet_Wind_B7()
        # model = Effnet_Wind_B5()
    else:
        model = Effnet_Wind_B5_exp_6()
    # model = ResNetExample()
    # if not exp:
    #     model = Seresnext_Wind()
    # else:
    #     model = Seresnext_Wind_Exp()

    # Optimizer
    if args.opt == 'radam':
        optimizer = RAdam(
            model.parameters(),
            lr=args.lr,
            betas=(0.9, 0.999),
            eps=1e-8,
            weight_decay=args.weight_decay,
        )
    elif args.opt == 'adamw':
        optimizer = AdamW(model.parameters(), args.lr)

    elif args.opt == 'adam':
        optimizer = Adam(model.parameters(),
                         args.lr,
                         weight_decay=args.weight_decay)
    else:
        optimizer = SGD(model.parameters(),
                        args.lr,
                        momentum=0.9,
                        nesterov=True,
                        weight_decay=args.weight_decay)

    if args.weights:
        # model.load_state_dict(torch.load(args.weights))
        last_epoch = extract_number(args.weights)
        try:
            checkpoint = torch.load(args.weights)
            model.load_state_dict(checkpoint['model_state_dict'])
            if checkpoint['pre_opt'] == args.opt:
                optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
                print(optimizer)
        except:
            model.load_state_dict(torch.load(args.weights))
    else:
        model.apply(reset_m_batchnorm)

    model.to(device)

    # Loss function
    if exp:
        criterion = JointLoss2()
    else:
        criterion = RMSELoss()

    # generate log and visualization
    save_path = args.save_path

    log_cache = (args.batch_size, args.image_size, shuffle, exp)

    write_log(args.save_path, model, optimizer, criterion, log_cache)

    plot_dict = {'train': list(), 'val': list()}

    log_train_path = save_path + 'training_log.txt'
    plot_train_path = save_path + 'log.json'

    write_mode = 'w'

    if os.path.exists(log_train_path) and os.path.exists(plot_train_path):
        write_mode = 'a'
        with open(plot_train_path, 'r') as j:
            plot_dict = json.load(j)
            plot_dict['train'] = plot_dict['train'][:last_epoch]
            plot_dict['val'] = plot_dict['val'][:last_epoch]

    # Training
    print('Start warm up')
    model.freeze_except_last()
    for epoch in range(warm_up_epoch):
        warm_up(
            model=model,
            dataloader=warm_loader,
            optimizer=optimizer,
            criterion=criterion,
            device=device,
        )
    model.unfreeze()
    with open(log_train_path, write_mode) as f:
        for epoch in range(1, args.epoch + 1):
            print('Epoch:', epoch + last_epoch)
            f.write('Epoch: %d\n' % (epoch + last_epoch))
            loss = train_epoch(model=model,
                               dataloader=train_loader,
                               optimizer=optimizer,
                               criterion=criterion,
                               device=device,
                               exp=exp)
            RMSE = val_epoch(model=model,
                             dataloader=val_loader,
                             device=device,
                             exp=exp,
                             anchors=anchors)
            if not exp:
                f.write('Training loss: %.4f\n' % (loss))
                f.write('RMSE val: %.4f\n' % (RMSE))
                print('RMSE loss: %.4f' % (loss))
                print('RMSE val: %.4f' % (RMSE))
            else:
                loss, classify, regress = loss
                RMSE, accuracy = RMSE
                f.write('Training loss: %.4f\n' % (loss))
                f.write('Classification loss: %.4f\n' % (classify))
                f.write('Regression loss: %.4f\n' % (regress))
                f.write('Accuracy val: %.4f\n' % (accuracy))
                f.write('RMSE val: %.4f\n' % (RMSE))
                print('Training loss: %.4f' % (loss))
                print('Classification loss: %.4f' % (classify))
                print('Regression loss: %.4f' % (regress))
                print('Accuracy val: %.4f' % (accuracy))
                print('RMSE val: %.4f' % (RMSE))

            # torch.save(model.state_dict(), save_path + 'epoch%d.pth'%(epoch+last_epoch))
            save_name = save_path + 'epoch%d.pth' % (epoch + last_epoch)
            save_pth(save_name, epoch + last_epoch, model, optimizer, args.opt)

            plot_dict['train'].append(loss)
            plot_dict['val'].append(RMSE)
            with open(plot_train_path, 'w') as j:
                json.dump(plot_dict, j)
Esempio n. 15
0
                                    num_classes=len(classnames)).to(device)
        elif args.backbone == 'shufflenetv2':
            model = shufflenetv2.ShuffleNetV2(
                anchors, in_size=in_size,
                num_classes=len(classnames)).to(device)
        else:
            print('unknown backbone architecture!')
            sys.exit(0)
        model.load_state_dict(torch.load(args.model, map_location=device))
    else:
        model = torch.load(args.model, map_location=device)
    model.eval()

    decoder = yolov3.YOLOv3EvalDecoder(in_size, len(classnames), anchors)
    transform = dataset.get_transform(train=False,
                                      net_w=in_size[0],
                                      net_h=in_size[1])
    FloatTensor = torch.cuda.FloatTensor if torch.cuda.is_available(
    ) else torch.FloatTensor

    def process_single_image(filename):
        bgr = cv2.imread(filename, cv2.IMREAD_COLOR)
        assert bgr is not None, 'cv2.imread({}) fail'.format(filename)
        rgb = cv2.cvtColor(bgr, cv2.COLOR_BGR2RGB)
        x, _ = transform(rgb, None)
        x = x.type(FloatTensor) / norm
        ys = model(x)
        dets = decoder(ys)
        dets = utils.get_network_boxes(dets, bgr.shape[:2], thresh=args.thresh)
        dets = utils.do_nms_sort(dets)
        if args.store:
Esempio n. 16
0
def main():
    savePath = 'model.pth'
    selectedDevice = ''
    # let's train it for 10 epochs
    num_epochs = 10
    try:
        opts, args = getopt.getopt(sys.argv[1:], "o:d:e:",
                                   ["modelOutputPath=", "device=", "epochs="])
    except getopt.error as err:
        print(str(err))
        sys.exit(2)
    for opt, arg in opts:
        if opt in ("-o", "--modelOutputPath"):
            if arg.endswith(".pth") is True or arg.endswith('.pt') is True:
                savePath = arg
            else:
                print("Input Error: modelOutputPath must end in .pth or .pt")
                sys.exit(2)
        if opt in ("-d", "--device"):
            lower = arg.lower()
            if lower == "gpu" or lower == "cpu":
                selectedDevice = arg.lower()
            else:
                print("Input Error: Device must be either 'gpu' or 'cpu'")
                sys.exit(2)
        if opt in ("-e", "--epochs"):
            epochs = int(arg)
            if epochs > 0:
                num_epochs = epochs

    # train on the GPU or on the CPU, if a GPU is not available
    device = torch.device(
        'cuda') if torch.cuda.is_available() else torch.device('cpu')

    # check if user has requested either cpu or gpu processing
    if torch.cuda.is_available() and selectedDevice == '':
        print("CUDA device is detected, using GPU for training and evaluation")
    elif selectedDevice != '':
        if selectedDevice == 'gpu':
            if torch.cuda.is_available() is False:
                print("Cannot find CUDA driver or device")
                sys.exit(2)
            device = torch.device('cuda')
        if selectedDevice == 'cpu':
            device = torch.device('cpu')

    # our dataset has two classes only - background and person
    num_classes = 2

    # use our dataset and defined transformations
    dataset = PennFudanDataset('PennFudanPed', get_transform(train=True))
    dataset_test = PennFudanDataset('PennFudanPed', get_transform(train=False))

    # split the dataset in train and test
    indices = torch.randperm(len(dataset)).tolist()
    dataset = torch.utils.data.Subset(dataset, indices[:-50])
    dataset_test = torch.utils.data.Subset(dataset_test, indices[-50:])

    # define training and validation data loaders
    # these organize the process for sending information to the GPU
    data_loader = torch.utils.data.DataLoader(dataset,
                                              batch_size=2,
                                              shuffle=True,
                                              num_workers=4,
                                              collate_fn=utils.collate_fn)

    data_loader_test = torch.utils.data.DataLoader(dataset_test,
                                                   batch_size=1,
                                                   shuffle=False,
                                                   num_workers=4,
                                                   collate_fn=utils.collate_fn)

    # get the model using our helper function
    model = get_model_instance_segmentation(num_classes)

    # move model to the selected device
    model.to(device)

    # construct an optimizer
    params = [p for p in model.parameters() if p.requires_grad]
    optimizer = torch.optim.SGD(params,
                                lr=0.005,
                                momentum=0.9,
                                weight_decay=0.0005)
    # and a learning rate scheduler
    lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                   step_size=3,
                                                   gamma=0.1)

    for epoch in range(num_epochs):
        # train for one epoch, printing every 10 iterations
        train_one_epoch(model,
                        optimizer,
                        data_loader,
                        device,
                        epoch,
                        print_freq=10)
        # update the learning rate
        lr_scheduler.step()
        # evaluate on the test dataset
        evaluate(model, data_loader_test, device=device)

    # save the model to the current directory
    torch.save(model.state_dict(), savePath)
    print("Finished")