Exemple #1
0
        label_species = int(self.file_info.iloc[idx]['species'])
        label_classes = int(self.file_info.iloc[idx]['classes'])

        sample = {
            'image': image,
            'species': label_species,
            'classes': label_classes
        }
        if self.transform:
            sample['image'] = self.transform(image)
        return sample


train_transforms = transforms.Compose([
    transforms.Resize((500, 500)),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(),
])  #?0.5??????????PIL??
val_transforms = transforms.Compose(
    [transforms.Resize((500, 500)),
     transforms.ToTensor()])

train_dataset = MyDataset(root_dir=ROOT_DIR + TRAIN_DIR,
                          annotations_file=TRAIN_ANNO,
                          transform=train_transforms)

test_dataset = MyDataset(root_dir=ROOT_DIR + VAL_DIR,
                         annotations_file=VAL_ANNO,
                         transform=val_transforms)

train_loader = DataLoader(dataset=train_dataset, batch_size=64, shuffle=True)
Exemple #2
0
def corrupted_cifar_uncertainty(method,
                                batch_size,
                                use_extra_corruptions=False,
                                dataset='cifar10',
                                normalize=False):
    assert dataset in ['cifar100', 'cifar10']

    if dataset == 'cifar10':
        dataset = CorruptedCifar10
        t = [
            transforms.ToTensor(),
            transforms.Normalize((0.4914, 0.4822, 0.4465),
                                 (0.2023, 0.1994, 0.2010))
        ]
    else:
        dataset = CorruptedCifar100
        t = [
            transforms.ToTensor(),
            transforms.Normalize((0.5071, 0.4867, 0.4408),
                                 (0.2675, 0.2565, 0.2761))
        ]

    if use_extra_corruptions:
        corruptions = chain(BENCHMARK_CORRUPTIONS, EXTRA_CORRUPTIONS)
    else:
        corruptions = BENCHMARK_CORRUPTIONS

    scores = {name: {} for name in corruptions}
    entropy = {name: {} for name in corruptions}
    preds = {name: {} for name in corruptions}
    eces = {name: {} for name in corruptions}

    true_labels = None

    for name in corruptions:
        for severity in range(1, 6):
            # CIFAR 10
            loader = torch.utils.data.DataLoader(dataset=dataset(
                './datasets/',
                download=True,
                corruption=name,
                severity=severity,
                transform=transforms.Compose(t)),
                                                 batch_size=batch_size,
                                                 shuffle=False,
                                                 pin_memory=True,
                                                 num_workers=4)

            ece, _, _, _ = ece_score(method, loader)

            eces[name][severity] = ece
            scores[name][severity] = eval_method(method, dataset=loader)

            probs, labels, predictions = get_logits(method, loader)
            if true_labels is None:
                true_labels = labels

            hs = []
            for x, y in loader:
                # true.extend(y.tolist())
                p, _ = method.predict_proba(x, y, True)

                plog = (p + 1e-12).log()
                h = plog * p
                if normalize:
                    h = h / np.log(h.shape[-1])
                h = -torch.sum(h, -1)
                hs.extend(h.tolist())

            entropy[name][severity] = hs
            preds[name][severity] = predictions

    return entropy, preds, scores, true_labels
Exemple #3
0
from sklearn.preprocessing import OneHotEncoder

#图片路径
train_list = sorted([
    os.path.join('dataset1/train/', img)
    for img in os.listdir('dataset1/train/')
])
trainGT_list = sorted([
    os.path.join('dataset1/train_GT/SEG/', img)
    for img in os.listdir('dataset1/train_GT/SEG/')
])
imgsize = 628
#???
data_transforms = transforms.Compose([
    #transforms.RandomHorizontalFlip()
    #transforms.Pad(92,padding_mode='symmetric'),
    transforms.ToTensor(),
    transforms.Normalize([0.5], [0.5])
])
datalabel_transforms = transforms.Compose([
    transforms.ToTensor(),
])


#numpy形式
def loadpro_img(file_names):
    images = []
    for file_name in file_names:
        img = cv2.imread(file_name, -1)  #读取时不做改变,默认参数1加载彩色图片
        img = img.astype(np.uint8)
        #img = Image.fromarray(img)
        images.append(img)
Exemple #4
0
        if True:
            std = tch.exp(0.5 * logvar)
            eps = tch.randn_like(std)
            return eps.mul(std).add_(mu)
        else:
            return mu


def loss_function(recon_x, x, mu, logvar):
    BCE = F.binary_cross_entropy(recon_x, x.view(-1, 784), size_average=False)
    KLD = -0.5 * tch.sum(1 + logvar - mu.pow(2) - logvar.exp())

    return BCE + KLD


tr = transforms.Compose([transforms.ToTensor()])

train = MNIST('./data', transform=tr, download=True)
test = MNIST('./data', transform=tr, train=False, download=True)

epoch = 10
batch_size = 30
train = DataLoader(train, batch_size, True)
test = DataLoader(test, batch_size, False)
vae = VAE((1, 28, 28), 12)
sgd = tch.optim.RMSprop(vae.parameters())
mse = nn.MSELoss(size_average=False)

for j in range(epoch):
    con = 0
    dis = 0
Exemple #5
0
def gen_transform_validation(crop_size=448):
    return transforms.Compose([
        transforms.CenterCrop(crop_size),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.5], std=[0.5])
     ])
Exemple #6
0
def build_transforms(cfg):
    return transforms.Compose(transforms.ToTensor())
import argparse
import pickle
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from torchvision import datasets
from torchvision.models import resnet18
from torchvision.transforms import transforms
from tensorboardX import SummaryWriter

writer = SummaryWriter()
transform = transforms.Compose([
    transforms.Resize(255),
    transforms.RandomCrop(224),
    transforms.ToTensor()
])

parser = argparse.ArgumentParser()
parser.add_argument('--dataset-dir',
                    type=str,
                    default='PKUMMDv1',
                    help='dataset directory')
parser.add_argument('--gpu',
                    default=False,
                    action='store_true',
                    help='whether to use gpus for training')
parser.add_argument('--batch-size', type=int, default=128, help='batch size')
parser.add_argument('--learning-rate',
                    type=float,
Exemple #8
0
def main(args, cfgs):
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    logging.info(f'Using device {device}')
    ############################################
    class_names = cfgs["class_names"]
    n_classes = cfgs["n_classes"]
    multi_gpu = cfgs["gpus"]
    Net = cfgs["net_name"]
    image_root = cfgs["image_root"]
    epochs = cfgs["epochs"]
    batch_size_train = cfgs["batch_size_train"]
    checkpoint_dir = cfgs["checkpoint_dir"]
    checkpoint_space = cfgs["checkpoint_space"]
    if not os.path.exists(checkpoint_dir):
        os.makedirs(checkpoint_dir)
    ############################################
    validate_flag = args.validate
    model_load_path = args.checkpoint_path
    ############################################
    try:
        if Net == "deeplab":
            net = DeepLab(num_classes = n_classes)
        elif Net == "resunet":
            net = ResUNet(3, n_classes)
            net.apply(weights_init)
        elif Net == "unet":
            net = UNet(n_channels = 3, n_classes = n_classes)
        elif Net == "R2U_Net":
            net = R2U_Net(img_ch=3,output_ch=n_classes,t=3)
        elif Net == "AttU_Net":
            net = AttU_Net(img_ch=3,output_ch=n_classes)
        elif Net == "R2AttU_Net":
            net = R2AttU_Net(img_ch=3,output_ch=n_classes,t=3)
        elif Net == "U_Net":
            net = U_Net(img_ch=3,output_ch=n_classes)
        net.to(device = device)
        if multi_gpu > 1:
            net = nn.parallel.DataParallel(net)
        ############################################
        optimizer = optim.Adam(net.parameters())
        if n_classes > 1:
            criterion = nn.CrossEntropyLoss()
            # criterion = MultiClassDiceLoss()
        else:
            criterion = nn.BCEWithLogitsLoss()
        ############################################
        # todo transformer
        transform_train = transforms.Compose([
            #transforms.Resize((307, 409)),
            transforms.ToTensor(),
        ])
        transform_val = transforms.Compose([
            #transforms.Resize((307, 409)),
            transforms.ToTensor(),
        ])
        ############################################
        n_folds = 4
        imageDir = os.path.join(image_root, "digestpath_img_patch")
        #maskDir = os.path.join(image_root, "digestpath_mask_patch")
        typeNames = ["normal", "low level", "high level"]
        #typeNames = [ "high level"]
        trainImagePaths = []
        validImagePaths = []
        testImagePaths = []
        for i in range(len(typeNames)):
            print(typeNames[i])
            subDir = os.path.join(imageDir, typeNames[i])
            subjectIds = os.listdir(subDir)
            tmpIndex1 = len(subjectIds) // n_folds
            tmpIndex2 = len(subjectIds) // n_folds * 2

            for subjectId in subjectIds[tmpIndex2:]:
                subjectDir = os.path.join(subDir, subjectId)
                for fileName in os.listdir(subjectDir):
                    filePath = os.path.join(subjectDir, fileName)
                    trainImagePaths.append(filePath)
            for subjectId in subjectIds[:tmpIndex1]:
                subjectDir = os.path.join(subDir, subjectId)
                for fileName in os.listdir(subjectDir):
                    filePath = os.path.join(subjectDir, fileName)
                    validImagePaths.append(filePath)
            for subjectId in subjectIds[tmpIndex1:tmpIndex2]:
            #for subjectId in subjectIds:
                subjectDir = os.path.join(subDir, subjectId)
                for fileName in os.listdir(subjectDir):
                    filePath = os.path.join(subjectDir, fileName)
                    testImagePaths.append(filePath)
        trainMaskPaths = [p.replace("/digestpath_img_patch/", "/digestpath_mask_patch/")[:-4]+".npy" for p in trainImagePaths]
        validMaskPaths = [p.replace("/digestpath_img_patch/", "/digestpath_mask_patch/")[:-4]+".npy" for p in validImagePaths]
        testMaskPaths = [p.replace("/digestpath_img_patch/", "/digestpath_mask_patch/")[:-4]+".npy" for p in testImagePaths]
        print("train-set #", len(trainMaskPaths))
        print("valid-set #", len(validMaskPaths))
        print("test-set #", len(testMaskPaths))
        ############################################
        train_set = wsiDataset(trainImagePaths, trainMaskPaths, transform_train)
        train_loader = DataLoader(train_set, batch_size=batch_size_train, num_workers=4, shuffle=True)
        val_set = wsiDataset(validImagePaths, validMaskPaths, transform_val)
        val_loader = DataLoader(val_set, batch_size=1, num_workers=4, shuffle=False)
        test_set = wsiDataset(testImagePaths, testMaskPaths, transform_val)
        test_loader = DataLoader(test_set, batch_size=1, num_workers=4, shuffle=False)
        ############################################
        if validate_flag:
            if os.path.exists(model_load_path):
                net.load_state_dict(torch.load(model_load_path, map_location = device))
                logging.info(f'Checkpoint loaded from {model_load_path}')
                validate_args = {"n_classes":n_classes, "checkpoint_dir":checkpoint_dir, "checkpoint_space":checkpoint_space,}
                diceRes, iouRes = validate(test_loader, net, criterion, device, validate_args)
                logging.info(f'Test-dataset dice: {diceRes} !')
                logging.info(f'Test-dataset iou: {iouRes} !')
            else:
                logging.info(f'No such checkpoint !')
        else:
            # todo lr_scheduler
            train_args = {"n_classes":n_classes, "epochs":epochs, "checkpoint_dir":checkpoint_dir, "checkpoint_space":checkpoint_space,}
            train(train_loader, val_loader, net, criterion, optimizer, device, train_args)
    except KeyboardInterrupt:
        torch.save(net.state_dict(), os.path.join(checkpoint_dir,'INTERRUPTED.pth'))
        logging.info('Saved interrupt')
        try:
            sys.exit(0)
        except SystemExit:
            os._exit(0)
Exemple #9
0
    def __init__(self, root=r"C:\datasets\taco\tensors", augment_prob=0.0, reduce=0.0, image_size=84,
                 tensors=True,
                 random_seed=42, **kwargs):
        self.reduce = reduce
        self.tensors = tensors
        random.seed(random_seed)

        resize_train = transforms.Compose(
            [
                transforms.Resize(image_size),
                transforms.CenterCrop(image_size),
            ]
        )
        resize_test = transforms.Compose(
            [
                transforms.Resize(image_size),
                transforms.CenterCrop(image_size),
            ]
        )

        augment = transforms.Compose(
            [
                # transforms.RandomRotation(degrees=15),
                transforms.RandomHorizontalFlip(p=0.5),
                # transforms.ColorJitter(),
                # transforms.RandomPerspective(p=0.2, distortion_scale=0.25),
            ]
        )

        normalize = transforms.Compose(
            [
                transforms.ToTensor(),
                transforms.Normalize(mean=MEAN, std=STD)
            ]
        )

        self.test_transform = transforms.Compose(
            [
                resize_test,
                normalize
            ]
        )
        self.train_transform = transforms.Compose(
            [
                resize_train,
                transforms.RandomApply([augment], p=augment_prob),
                normalize
            ]
        )
        if not tensors:
            self.source_dataset_train = torchvision.datasets.ImageFolder(root=root)
        else:
            self.source_dataset_train = torchvision.datasets.DatasetFolder(root=root, loader=tensor_loader,
                                                                           extensions=('pt',))

        self.dataset_train_size = len(self.source_dataset_train)
        items = []
        labels = []
        for i in range(self.dataset_train_size):
            items.append(ImageItem(self.source_dataset_train, i))
            labels.append(self.source_dataset_train[i][1])
        is_test = [0] * self.dataset_train_size

        super(TacoDataset, self).__init__(items, labels, is_test)

        self.train_subdataset, self.test_subdataset = self.subdataset.train_test_split()

        if reduce < 1:
            self.train_subdataset = self.train_subdataset.downscale(1 - reduce)
        else:
            self.train_subdataset = self.train_subdataset.balance(reduce)
                query_dict[query_part] = np.random.choice([query_image for query_image in glob(os.path.join(query_folder, '%03d_*.png') %int(query_part))], arg.num_te)
            else:
                query_dict[query_part] = [query_image for query_image in glob(os.path.join(query_folder, '%03d_*.png') %int(query_part))]

    else:
        query_part_set = set([part_image.split('_')[0] for part_image in os.listdir(query_folder)])
        query_dict = dict()
        for query_part in query_part_set:
            query_dict[query_part] = [query_image for query_image in glob(os.path.join(cad_base, furniture, query_part, '*.png'))]

    ## Input surface vs Query surface (Similarity Comparison)
    # Transformation
    transform_input = transforms.Compose([
        transform_.Padded(),
        transform_.RandomResize(max_size=512, ratio=1.5),
        transforms.Resize([512, 512]),
        transform_.Binary(criterion=150),
        transforms.ToTensor(),
        transform_.MintoMax()
    ])

    transform_query = transforms.Compose([
        transforms.Resize([512, 512]),
        transform_.Binary(criterion=150),
        transforms.ToTensor(),
        transform_.MintoMax()
    ])

    transform3D = transforms.Compose([
        transforms.Resize([512, 512]),
        transforms.ToTensor(),])
import os
import glob
import cv2
import numpy as np
import torch
from torchvision.transforms import transforms
from natsort import natsorted
from models import resmasking_dropout1
from utils.datasets.fer2013dataset import EMOTION_DICT
from barez import show

transform = transforms.Compose(
    [
        transforms.ToPILImage(),
        transforms.ToTensor(),
    ]
)


def activations_mask(tensor):
    tensor = torch.squeeze(tensor, 0)
    tensor = torch.mean(tensor, 0)
    tensor = tensor.detach().cpu().numpy()
    tensor = np.maximum(tensor, 0)
    tensor = cv2.resize(tensor, (224, 224))
    tensor = tensor - np.min(tensor)
    tensor = tensor / np.max(tensor)

    heatmap = cv2.applyColorMap(np.uint8(255 * tensor), cv2.COLORMAP_JET)
    return heatmap
Exemple #12
0
# Title: 测试Dsb2018CellDataset数据集.
# URL:
# Date: 2020-12-23
#

import torch
from torch.utils.data import DataLoader
from torchvision.transforms import transforms
from unetzoo.dataset.dataset import DSB2018CellDataset


if __name__ == '__main__':
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    x_transforms = transforms.Compose([
        transforms.ToTensor(),  # -> [0,1]
        transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])  # ->[-1,1]
    ])

    # mask只需要转换为tensor
    y_transforms = transforms.ToTensor()

    train_dataset = DSB2018CellDataset(r"train", transform=x_transforms, target_transform=y_transforms)
    train_dataloaders = DataLoader(train_dataset, batch_size=8)
    val_dataset = DSB2018CellDataset(r"val", transform=x_transforms, target_transform=y_transforms)
    val_dataloaders = DataLoader(val_dataset, batch_size=1)
    test_dataloaders = val_dataloaders

    print(len(train_dataloaders.dataset))

    for x, y, _, mask in train_dataloaders:
        inputs = x
Exemple #13
0
os.environ["CUDA_VISIBLE_DEVICES"] = '0'

init_seeds(0)
model = SVHN_Model1().cuda()
test_path = sorted(glob.glob('D:/Projects/wordec/input/test/*.png'))
# test_json = json.load(open('../input/test.json'))
test_label = [[1]] * len(test_path)
# print(len(test_path), len(test_label))

test_loader = torch.utils.data.DataLoader(
    SVHNDataset(test_path, test_label,
                transforms.Compose([
                    transforms.Resize((70, 140)),
                    # transforms.RandomCrop((60, 120)),
                    # transforms.ColorJitter(0.3, 0.3, 0.2),
                    # transforms.RandomRotation(5),
                    transforms.ToTensor(),
                    transforms.Normalize([0.485, 0.456, 0.406], [
                                         0.229, 0.224, 0.225])
                ])),
    batch_size=40,
    shuffle=False,
    num_workers=0,
)

# 加载保存的最优模型
model.load_state_dict(torch.load('D:/Projects/wordec/model.pt'))

test_predict_label = predict(test_loader, model, 1)
print(test_predict_label.shape)
print('test_predict_label', test_predict_label)
def main():
    args = parse_args()
    FRAME_LIMIT = args.frame_limit
    VIDEO_FILE = args.video_file
    DRAW_BOXES = args.draw_boxes
    SCORES_FILE = args.scores_file
    FOLDER = args.output_folder
    OUTPUT_VIDEO = args.output_video
    FACE_DETECTOR = args.face_detector
    USE_WEBCAM = args.use_webcam
    TRACK_BOXES = args.track_boxes
    THRESHOLD = args.threshold

    # load configs and set random seed
    configs = json.load(open("./configs/fer2013_config.json"))
    image_size = (configs["image_size"], configs["image_size"])

    transform = transforms.Compose([transforms.ToPILImage(), transforms.ToTensor()])
    detector = load_face_detector(FACE_DETECTOR)

    model = load_model()

    vid = cv2.VideoCapture(0)

    with torch.no_grad():
        if USE_WEBCAM:
            while True:
                ret, img = vid.read()
                if img is None or ret is not True:
                    continue

                faces = detector.detect_faces(img, THRESHOLD)
                img, _ = detect_emotions(model, img, faces, image_size, transform, DRAW_BOXES, TRACK_BOXES)

                cv2.imshow("img", img)
                if cv2.waitKey(1) == ord("q"):
                    break

            cv2.destroyAllWindows()
        else:
            print(f">> Reading video from {VIDEO_FILE}")
            frames = collect_frames(VIDEO_FILE, FRAME_LIMIT)

            if TRACK_BOXES:
                df = pd.read_csv(SCORES_FILE, header=None, index_col=None, names=[
                "track_id", "frame_id", "box_id", "x", "y", "x2", "y2"] + list(range(80)))
                df["abs_frame_id"] = df.frame_id + df.track_id - 128
                df_faces = []

                print(">> Detecting emotions")
                for i, frame in tqdm(enumerate(frames), total=len(frames)):
                    faces = match_faces_bodies(
                            frame,
                            df[df.abs_frame_id == i],
                            detector=detector,
                            threshold=THRESHOLD
                    )
                    frames[i], new_df_faces = detect_emotions(model, frame, faces, image_size, transform, DRAW_BOXES, TRACK_BOXES)

                    df_faces += new_df_faces

                df_faces = pd.DataFrame(df_faces)
                print(f">> Saving box files at {FOLDER}")

                if os.path.exists(FOLDER):
                    shutil.rmtree(FOLDER)
                os.mkdir(FOLDER)
                box_ids = df_faces.box_id.unique()
                for box_id in tqdm(box_ids, total=len(box_ids)):
                    with open(os.path.join(FOLDER, f"person{box_id}.txt"), "w") as f:
                        for _, frame in df_faces[df_faces.box_id == box_id].iterrows():
                            bbox = str([frame.x, frame.y, frame.x2, frame.y2]).replace(" ", "").strip("[]")
                            emotion = list(frame.emotion.keys())[0]
                            f.write(f"{frame.frame_id},{bbox},{emotion},{frame.emotion[emotion]}\n")

            else:
                print(">> Detecting emotions")
                for i, frame in tqdm(enumerate(frames), total=len(frames)):
                    faces = detector.detect_faces(frame, THRESHOLD)
                    frames[i], _ = detect_emotions(model, frame, faces, image_size, transform, DRAW_BOXES, TRACK_BOXES)

            print(f">> Creating video at {OUTPUT_VIDEO}")
            height, width, _ = frames[0].shape
            out = cv2.VideoWriter(OUTPUT_VIDEO, cv2.VideoWriter_fourcc(*"mp4v"), 25, (width, height), True)
            for frame in tqdm(frames, total=len(frames)):
                out.write(frame)
            cv2.destroyAllWindows()
            out.release()
Exemple #15
0
    "xresnet18": xresnet18ssa,
    "xresnet34": xresnet34ssa
}
Model = model_dict[args.model]

train_df = pd.read_csv(args.train_df_path)
processed = pickle.load(open(args.train_processed_path, "rb"))
test_fns = sorted(os.listdir(args.test_dir))
test_df = pd.DataFrame()
test_df["File"] = test_fns
processed_test = pickle.load(open(args.test_processed_path, "rb"))

transforms_dict = {
    'train':
    transforms.Compose([
        transforms.RandomHorizontalFlip(0.5),
        transforms.ToTensor(),
    ]),
    'test':
    transforms.Compose([
        transforms.RandomHorizontalFlip(0.5),
        transforms.ToTensor(),
    ]),
}

x_train = processed
y_train = train_df.Label.values.astype(np.long)
x_test = processed_test
splits = list(
    StratifiedKFold(n_splits=5, shuffle=True,
                    random_state=SEED).split(x_train, y_train))
Exemple #16
0
def get_transforms(config, image_size=None):
    config = config.get_dictionary()
    if image_size is not None:
        image_size = image_size
    elif config['estimator'] not in resize_size_dict:
        image_size = 32
    else:
        image_size = resize_size_dict[config['estimator']]

    val_transforms = transforms.Compose([
        transforms.Resize(image_size),
        transforms.CenterCrop(image_size),
        transforms.ToTensor(),
    ])
    if parse_bool(config['aug']):
        if parse_bool(config['auto_aug']):
            # from .transforms import AutoAugment
            data_transforms = {
                'train':
                transforms.Compose([
                    # AutoAugment(),
                    transforms.Resize(image_size),
                    transforms.RandomCrop(image_size,
                                          padding=int(image_size / 8)),
                    transforms.RandomHorizontalFlip(),
                    transforms.ToTensor(),
                ]),
                'val':
                val_transforms,
            }
        else:
            transform_list = []
            if parse_bool(config['jitter']):
                transform_list.append(
                    transforms.ColorJitter(brightness=config['brightness'],
                                           saturation=config['saturation'],
                                           hue=config['hue']))
            if parse_bool(config['affine']):
                transform_list.append(
                    transforms.RandomAffine(degrees=config['degree'],
                                            shear=config['shear']))

            transform_list.append(transforms.RandomResizedCrop(image_size))
            transform_list.append(transforms.RandomCrop(image_size, padding=4))

            if parse_bool(config['random_flip']):
                transform_list.append(transforms.RandomHorizontalFlip())

            transform_list.append(transforms.ToTensor())

            data_transforms = {
                'train': transforms.Compose(transform_list),
                'val': val_transforms
            }
    else:
        data_transforms = {
            'train':
            transforms.Compose([
                transforms.Resize(image_size),
                transforms.CenterCrop(image_size),
                transforms.ToTensor(),
            ]),
            'val':
            val_transforms,
        }
    return data_transforms
    def __init__(
        self,
        c,
        nof_joints,
        checkpoint_path,
        model_name='HRNet',
        resolution=(384, 288),
        interpolation=cv2.INTER_CUBIC,
        multiperson=True,
        return_heatmaps=False,
        return_bounding_boxes=False,
        max_batch_size=32,
        yolo_model_def="./models/detectors/yolo/config/yolov3.cfg",
        yolo_class_path="./models/detectors/yolo/data/coco.names",
        yolo_weights_path="./models/detectors/yolo/weights/yolov3.weights",
        device=torch.device("cpu")):
        """
        Initializes a new SimpleHRNet object.
        HRNet (and YOLOv3) are initialized on the torch.device("device") and
        its (their) pre-trained weights will be loaded from disk.

        Args:
            c (int): number of channels (when using HRNet model) or resnet size (when using PoseResNet model).
            nof_joints (int): number of joints.
            checkpoint_path (str): path to an official hrnet checkpoint or a checkpoint obtained with `train_coco.py`.
            model_name (str): model name (HRNet or PoseResNet).
                Valid names for HRNet are: `HRNet`, `hrnet`
                Valid names for PoseResNet are: `PoseResNet`, `poseresnet`, `ResNet`, `resnet`
                Default: "HRNet"
            resolution (tuple): hrnet input resolution - format: (height, width).
                Default: (384, 288)
            interpolation (int): opencv interpolation algorithm.
                Default: cv2.INTER_CUBIC
            multiperson (bool): if True, multiperson detection will be enabled.
                This requires the use of a people detector (like YOLOv3).
                Default: True
            return_heatmaps (bool): if True, heatmaps will be returned along with poses by self.predict.
                Default: False
            return_bounding_boxes (bool): if True, bounding boxes will be returned along with poses by self.predict.
                Default: False
            max_batch_size (int): maximum batch size used in hrnet inference.
                Useless without multiperson=True.
                Default: 16
            yolo_model_def (str): path to yolo model definition file.
                Default: "./models/detectors/yolo/config/yolov3.cfg"
            yolo_class_path (str): path to yolo class definition file.
                Default: "./models/detectors/yolo/data/coco.names"
            yolo_weights_path (str): path to yolo pretrained weights file.
                Default: "./models/detectors/yolo/weights/yolov3.weights.cfg"
            device (:class:`torch.device`): the hrnet (and yolo) inference will be run on this device.
                Default: torch.device("cpu")
        """

        self.c = c
        self.nof_joints = nof_joints
        self.checkpoint_path = checkpoint_path
        self.model_name = model_name
        self.resolution = resolution  # in the form (height, width) as in the original implementation
        self.interpolation = interpolation
        self.multiperson = multiperson
        self.return_heatmaps = return_heatmaps
        self.return_bounding_boxes = return_bounding_boxes
        self.max_batch_size = max_batch_size
        self.yolo_model_def = yolo_model_def
        self.yolo_class_path = yolo_class_path
        self.yolo_weights_path = yolo_weights_path
        self.device = device

        if model_name in ('HRNet', 'hrnet'):
            self.model = HRNet(c=c, nof_joints=nof_joints)
        elif model_name in ('PoseResNet', 'poseresnet', 'ResNet', 'resnet'):
            self.model = PoseResNet(resnet_size=c, nof_joints=nof_joints)
        else:
            raise ValueError('Wrong model name.')

        checkpoint = torch.load(checkpoint_path, map_location=self.device)
        if 'model' in checkpoint:
            self.model.load_state_dict(checkpoint['model'])
        else:
            self.model.load_state_dict(checkpoint)

        if 'cuda' in str(self.device):
            print("device: 'cuda' - ", end="")

            if 'cuda' == str(self.device):
                # if device is set to 'cuda', all available GPUs will be used
                print("%d GPU(s) will be used" % torch.cuda.device_count())
                device_ids = None
            else:
                # if device is set to 'cuda:IDS', only that/those device(s) will be used
                print("GPU(s) '%s' will be used" % str(self.device))
                device_ids = [int(x) for x in str(self.device)[5:].split(',')]

            self.model = torch.nn.DataParallel(self.model,
                                               device_ids=device_ids)
        elif 'cpu' == str(self.device):
            print("device: 'cpu'")
        else:
            raise ValueError('Wrong device name.')

        self.model = self.model.to(device)
        self.model.eval()

        if not self.multiperson:
            self.transform = transforms.Compose([
                transforms.ToTensor(),
                transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225]),
            ])

        else:
            self.detector = YOLOv3(model_def=yolo_model_def,
                                   class_path=yolo_class_path,
                                   weights_path=yolo_weights_path,
                                   classes=('person', ),
                                   max_batch_size=self.max_batch_size,
                                   device=device)
            self.transform = transforms.Compose([
                transforms.ToPILImage(),
                transforms.Resize((self.resolution[0],
                                   self.resolution[1])),  # (height, width)
                transforms.ToTensor(),
                transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225]),
            ])
        x = self.layerblock3(x)
        x = self.layerblock4(x)
        x = self.layerblock5(x)
        x = x.view(x.size(0), -1)
        x = self.classifier(x)
        return x


if __name__ == "__main__":

    filepath = './CIFAR10_dataset'
    #data loading and augmentation
    #cifar 10 normalization parameter https://arxiv.org/pdf/1909.12205.pdf
    train_transformations = transforms.Compose([
        transforms.RandomHorizontalFlip(),
        transforms.RandomCrop(32, padding=4),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.261))
    ])
    test_transformations = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.261))
    ])
    train_dataset = CIFAR10(root=filepath,
                            train=True,
                            download=True,
                            transform=train_transformations)
    test_dataset = CIFAR10(root=filepath,
                           train=False,
                           download=True,
                           transform=test_transformations)
    batch_size = 100
        plt.subplot(sqrtn, sqrtn, index+1)
        plt.imshow(image.reshape(28, 28))


device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
print('GPU State:', device)
G = generator().to(device)
D = discriminator().to(device)
print(G)
print(D)
epochs = 200
lr = 0.0002
batch_size = 64
g_optimizer = optim.Adam(G.parameters(), lr=lr, betas=(0.5, 0.999))
d_optimize = optim.Adam(D.parameters(), lr=lr, betas=(0.5, 0.999))
transform = transforms.Compose([transforms.ToTensor(),
                                transforms.Normalize((0.5,), (0.5,))])
train_set = datasets.MNIST('mnist/', train=True, download=False, transform=transform)
test_set = datasets.MNIST('mnist/', train=False, download=False, transform=transform)
train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(test_set, batch_size=batch_size, shuffle=False)


# Train
for epoch in range(epochs):
    epoch += 1

    for time, data in enumerate(train_loader):
        time += 1
        true_input = data[0].to(device)
        test = 255 * (0.5 * true_input[0] + 0.5)
Exemple #20
0
from pfrl import replay_buffers, explorers, experiments, q_functions
from pfrl.agents import DQN
from pfrl.q_functions import DiscreteActionValueHead

from torch import nn, optim
from torchvision.datasets import CIFAR10
from torchvision.transforms import transforms
import numpy as np
from tqdm import tqdm, trange

from rl.env_d import AttackEnv


if __name__ == '__main__':
    transform = transforms.Compose((transforms.ToTensor(), transforms.Normalize(0.5, 0.5, 0.5)))
    test_dataset = CIFAR10('data', train=False, transform=transform, download=False)

    image_size = 32 * 32
    n_classes = 10
    max_episodes = 20
    max_episode_len = 1000

    env = AttackEnv()

    obs_space = env.observation_space
    action_space = env.action_space

    obs_size = obs_space.low.size

    n_actions = action_space.n
Exemple #21
0
    ('arch', 'lenet5'),
    ('n_classes', 10),
    ('input_shape', (1, 28, 28)),
])

data_config = OrderedDict([
    ('dataset', 'SplitFashionMNIST'),
    ('valid', 0.0),
    ('num_workers', 4),
    (
        'train_transform',
        transforms.Compose([
            lambda x: np.array(x).reshape((1, 28, 28)),
            lambda x: np.pad(x, (
                (0, 0), (2, 2),
                (2, 2)), mode='minimum'),  # Padding is only required by LeNet
            lambda x: torch.FloatTensor(x),
            lambda x: x / 255.0,
            transforms.Normalize(np.array([0.1307]), np.array([0.3081]))
        ])),
    (
        'test_transform',
        transforms.Compose([
            lambda x: np.array(x).reshape((1, 28, 28)),
            lambda x: np.pad(x, (
                (0, 0), (2, 2),
                (2, 2)), mode='minimum'),  # Padding is only required by LeNet
            lambda x: torch.FloatTensor(x),
            lambda x: x / 255.0,
            transforms.Normalize(np.array([0.1307]), np.array([0.3081]))
        ])),
Exemple #22
0
        format_string = self.__class__.__name__ + '(size={0}'.format(self.size)
        format_string += ', xscale={0}'.format(
            tuple(round(s, 4) for s in self.xscale))
        format_string += ', yscale={0}'.format(
            tuple(round(r, 4) for r in self.xscale))
        format_string += ', interpolation={0})'.format(interpolate_str)
        return format_string


transform = {
    'train':
    transforms.Compose([
        transforms.RandomRotation(12, resample=Image.BILINEAR),
        CenterRandomCrop(size, xscale=(0.6, 1.0), aspect_ratio=(1, 1.4)),
        transforms.ColorJitter(0.2, 0.1, 0.1, 0.04),
        transforms.RandomHorizontalFlip(),
        transforms.RandomVerticalFlip(),
        transforms.ToTensor(),
        transforms.Normalize(mean, std)
    ]),
    'val':
    transforms.Compose([
        transforms.CenterCrop((512, 640)),
        transforms.Resize(size, interpolation=Image.BILINEAR),
        transforms.ToTensor(),
        transforms.Normalize(mean, std)
    ])
}

if torch.cuda.is_available():
    device = torch.device("cuda:0")
Exemple #23
0
from data_loader.DataPath import DataPath
from framework.loss import Loss
from config import ParallelConfig
from framework.segmentation.Mask import Mask
from segmentation.loss.neighbour_diff import NeighbourDiffLoss
from framework.segmentation.unet import UNetSegmentation
from viz.visualization import show_segmentation, show_images

LABELS_URL = 'https://s3.amazonaws.com/mlpipes/pytorch-quick-start/labels.json'
labels = {int(key):value for (key, value)
          in requests.get(LABELS_URL).json().items()}

transform = transforms.Compose([            #[1]
 transforms.Resize(128),                    #[2]
 transforms.CenterCrop(128),                #[3]
 transforms.ToTensor(),                     #[4]
 transforms.Normalize(                      #[5]
 mean=[0.485, 0.456, 0.406],                #[6]
 std=[0.229, 0.224, 0.225]                  #[7]
 )])

mask_transform = transforms.Compose([            #[1]
 transforms.Resize(128),                    #[2]
 transforms.CenterCrop(128),                #[3]
 transforms.ToTensor()                 #[4]
 ])


# trainset = CocoStuff10k(root="/home/nazar/PycharmProjects/coco", transform=transform)
trainset = Cityscapes(DataPath.HOME_STREET, transform=transform, target_transform=mask_transform)

trainloader = torch.utils.data.DataLoader(trainset, batch_size=20, shuffle=True, num_workers=12)
    F.cuda()

# load saved models
G.load_state_dict(torch.load('../save/G.pth'))
F.load_state_dict(torch.load('../save/F.pth'))

# set to eval states
G.eval()
F.eval()

Tensor = torch.cuda.FloatTensor if cuda else torch.Tensor
input_X = Tensor(batch_size, input_nc, image_size, image_size)
input_Y = Tensor(batch_size, output_nc, image_size, image_size)

transforms = transforms.Compose([
    transforms.Resize((image_size, image_size)),
    transforms.ToTensor(),
])

dataset = ImageDataset(dataroot=dataroot, transforms=transforms, mode='test')
dataloader = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers)

output_dir_X = '../output/X'
output_dir_Y = '../output/Y'
output_dir_recover = '../output/recover'

if not os.path.exists(output_dir_X):
    os.makedirs(output_dir_X)
if not os.path.exists(output_dir_Y):
    os.makedirs(output_dir_Y)
if not os.path.exists(output_dir_recover):
    os.makedirs(output_dir_recover)
Exemple #25
0
def gen_transform_test_multi(crop_size=448):
    return transforms.Compose([
        transforms.RandomCrop(crop_size),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.5], std=[0.5])
      ])
Exemple #26
0
#save = "/DB/VCDB/all_frames/resnet50-rmac"

if not os.path.exists(base):
    print("base '{}' is not exist.".format(base))
    exit()
if not os.path.exists(save):
    os.makedirs(os.path.join(save, 'v-features'))
    os.makedirs(os.path.join(save, 'f-features'))
    os.makedirs(os.path.join(save2, 'v-features'))
    os.makedirs(os.path.join(save2, 'f-features'))

videos = os.listdir(base)
videos.sort(key=int)
normalize = trn.Normalize(mean=[0.485, 0.456, 0.406],
                          std=[0.229, 0.224, 0.225])
video_trn = trn.Compose([trn.Resize(224), trn.ToTensor(), normalize])

model2 = resnet50(pretrained=True)
model2 = torch.nn.Sequential(*list(model2.children())[:-1])  # 2048 - dimension
model2.cuda()
model2 = torch.nn.DataParallel(model2)
summary(model2, (3, 224, 224))
model2.eval()

model = Resnet50_RMAC()
model.cuda()
model = torch.nn.DataParallel(model)
summary(model, (3, 224, 224))
model.eval()

with torch.no_grad():
    def __getitem__(self, index):
        '''
            over-writing the __getitem__ method to return the index
            along with img and target
        '''

        img, target = super().__getitem__(index)
        # print(f"mnist index: {index}")
        return img, target, index


if __name__ == "__main__":

    transform = transforms.Compose([
        # you can add other transformations in this list
        transforms.ToTensor()
    ])
    temp_MNIST = CustomMNIST(root='./data',
                             train=True,
                             download=True,
                             transform=transform)

    train_split_percentage = 0.2
    initial_train_set_size = len(temp_MNIST)
    needed_train_set_size = int(train_split_percentage *
                                initial_train_set_size)
    needed_val_set_size = initial_train_set_size - needed_train_set_size
    print(needed_train_set_size, needed_val_set_size)

    train_set, val_set = torch.utils.data.random_split(
        temp_MNIST, (needed_train_set_size, needed_val_set_size))
Exemple #28
0
def encode(img, bottleneck):

    transform = transforms.Compose([transforms.ToTensor()])
    inputs = transform(img).unsqueeze(0)

    encoder = Encoder()
    binarizer = Binarizer(int(bottleneck / 512))
    decoder = Decoder(int(bottleneck / 512))

    if torch.cuda.is_available():
        encoder = Encoder().cuda()
        binarizer = Binarizer(int(bottleneck / 512)).cuda()
        decoder = Decoder(int(bottleneck / 512)).cuda()
        encoder.load_state_dict(
            torch.load('./project/models/model_LSTM_' + str(bottleneck) +
                       '/encoder'))
        binarizer.load_state_dict(
            torch.load('./project/models/model_LSTM_' + str(bottleneck) +
                       '/binarizer'))
        decoder.load_state_dict(
            torch.load('./project/models/model_LSTM_' + str(bottleneck) +
                       '/decoder'))
    else:
        encoder.load_state_dict(
            torch.load('./project/models/model_LSTM_' + str(bottleneck) +
                       '/encoder',
                       map_location=torch.device('cpu')))
        binarizer.load_state_dict(
            torch.load('./project/models/model_LSTM_' + str(bottleneck) +
                       '/binarizer',
                       map_location=torch.device('cpu')))
        decoder.load_state_dict(
            torch.load('./project/models/model_LSTM_' + str(bottleneck) +
                       '/decoder',
                       map_location=torch.device('cpu')))

    encoder.eval()
    binarizer.eval()
    decoder.eval()

    if torch.cuda.is_available():
        e_1 = (torch.zeros(1, 256, 64, 64).cuda(), torch.zeros(1, 256, 64,
                                                               64).cuda())
        e_2 = (torch.zeros(1, 512, 32, 32).cuda(), torch.zeros(1, 512, 32,
                                                               32).cuda())
        e_3 = (torch.zeros(1, 512, 16, 16).cuda(), torch.zeros(1, 512, 16,
                                                               16).cuda())
        d_1 = (torch.zeros(1, 512, 16, 16).cuda(), torch.zeros(1, 512, 16,
                                                               16).cuda())
        d_2 = (torch.zeros(1, 512, 32, 32).cuda(), torch.zeros(1, 512, 32,
                                                               32).cuda())
        d_3 = (torch.zeros(1, 256, 64, 64).cuda(), torch.zeros(1, 256, 64,
                                                               64).cuda())
        d_4 = (torch.zeros(1, 128, 128,
                           128).cuda(), torch.zeros(1, 128, 128, 128).cuda())
        residual = inputs.cuda()
    else:
        e_1 = (torch.zeros(1, 256, 64, 64), torch.zeros(1, 256, 64, 64))
        e_2 = (torch.zeros(1, 512, 32, 32), torch.zeros(1, 512, 32, 32))
        e_3 = (torch.zeros(1, 512, 16, 16), torch.zeros(1, 512, 16, 16))
        d_1 = (torch.zeros(1, 512, 16, 16), torch.zeros(1, 512, 16, 16))
        d_2 = (torch.zeros(1, 512, 32, 32), torch.zeros(1, 512, 32, 32))
        d_3 = (torch.zeros(1, 256, 64, 64), torch.zeros(1, 256, 64, 64))
        d_4 = (torch.zeros(1, 128, 128, 128), torch.zeros(1, 128, 128, 128))
        residual = inputs

    binary = []
    for t in range(rnn_num):
        e_result, e_1, e_2, e_3 = encoder(residual, e_1, e_2, e_3)
        b_result = binarizer(e_result)
        outputs, d_1, d_2, d_3, d_4 = decoder(b_result, d_1, d_2, d_3, d_4)
        residual = residual - outputs
        binary.append(
            (torch.Tensor.cpu(b_result).detach().numpy().astype(np.int8) + 1)
            // 2)
    binary = np.stack(binary, axis=0)
    binary = np.packbits(binary, axis=-1)
    return binary
Exemple #29
0
    print("Checking      ......")

    args = get_args()

    check_args(args)

    print("Initializaion ......")

    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    #prepare data
    train_traisform = transforms.Compose([
        transforms.RandomResizedCrop(args.img_size),
        #transforms.Resize((args.img_size,args.img_size)),
        transforms.RandomHorizontalFlip(),
        #transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),
        transforms.ToTensor(),
        normalize,
    ])

    if args.crop:
        val_traisform = transforms.Compose([
            transforms.Resize(args.img_size),
            transforms.CenterCrop(args.img_size),
            #transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),
            transforms.ToTensor(),
            normalize,
        ])
    else:
        val_traisform = transforms.Compose([
            transforms.Resize((args.img_size, args.img_size)),
    def __init__(
            self,
            dataset_path='./drive/MyDrive/datasets/car classification/train_data',
            batch_size=1,
            model_name='tf_efficientnet_b3_ns',
            test_csv='./train_labels.csv',
            unique_csv='./train_labels.csv',
            output_dir='../drive/MyDrive/ckpt/grapheme/submission.csv',
            ckpt='../drive/MyDrive/ckpt/grapheme/20.pth'):

        # initialize attributes
        self.dataset_path = dataset_path
        self.batch_size = batch_size
        self.model_name = model_name
        self.test_csv = test_csv
        self.unique_csv = unique_csv
        self.output_dir = output_dir
        self.ckpt = ckpt

        if model_name == 'tf_efficientnet_b0_ns':
            self.input_size = (224, 224)
        elif model_name == 'tf_efficientnet_b3_ns':
            self.input_size = (300, 300)
        elif model_name == 'tf_efficientnet_b4_ns':
            scaleelf.input_size = (380, 380)
        elif model_name == 'tf_efficientnet_b6_ns':
            self.input_size = (528, 528)
        else:
            raise Exception('non-valid model name')

        # Compose transforms
        transform = []
        transform += [transforms.Resize(self.input_size)]
        self.transform = transforms.Compose(transform)

        self.test_dataset = BengaliDataset(self.test_csv,
                                           self.unique_csv,
                                           self.dataset_path,
                                           self.transform,
                                           cache=True)
        self.names = self.test_dataset.names
        self.test_dataloader = DataLoader(self.test_dataset,
                                          batch_size=self.batch_size,
                                          num_workers=0,
                                          shuffle=False)
        self.device = torch.device(
            'cuda' if torch.cuda.is_available() else 'cpu')
        self.model_root = MyModel(self.input_size,
                                  self.model_name,
                                  168,
                                  pretrained=True,
                                  dropout=0).to('cuda')
        self.model_consonant = MyModel(self.input_size,
                                       self.model_name,
                                       11,
                                       pretrained=True,
                                       dropout=0).to('cuda')
        self.model_vowel = MyModel(self.input_size,
                                   self.model_name,
                                   18,
                                   pretrained=True,
                                   dropout=0).to('cuda')
        self.model_multihead = MultiHeadModel(self.input_size,
                                              self.model_name,
                                              pretrained=True,
                                              dropout=0).to('cuda')

        ckpt = torch.load(self.ckpt)
        self.model_root.load_state_dict(ckpt['model_root_state_dict'])
        self.model_consonant.load_state_dict(
            ckpt['model_consonant_state_dict'])
        self.model_vowel.load_state_dict(ckpt['model_vowel_state_dict'])
        self.model_multihead.load_state_dict(
            ckpt['model_multihead_state_dict'])