示例#1
0
args.cuda = not args.no_cuda and torch.cuda.is_available()

if args.cuda is True:
    print("===> Using GPU to train")
    device = torch.device("cuda:0")
    cudnn.benchmark = True
else:
    device = torch.device("cpu")
    print("===> Using CPU to train")

torch.manual_seed(args.seed)
if args.cuda:
    torch.cuda.manual_seed(args.seed)

print("===> Loaing datasets")
images_A = get_image_paths("train/biden_face")
images_B = get_image_paths("train/sacha_face")
images_A = load_images(images_A) / 255.0
images_B = load_images(images_B) / 255.0
# images_A += images_B.mean(axis=(0, 1, 2)) - images_A.mean(axis=(0, 1, 2))

model = Autoencoder().to(device)

print("===> Try resume from checkpoint")
if os.path.isdir("checkpoint"):
    try:
        checkpoint = torch.load("./checkpoint/autoencoder.t7")
        model.load_state_dict(checkpoint["state"])
        start_epoch = checkpoint["epoch"]
        print("===> Load last checkpoint data")
    except FileNotFoundError:
示例#2
0
parser.add_argument('--logInterval', type=int, default=100, metavar='N',
                    help='how many batches to wait before logging training status')

args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.cuda is True:
    print('===> Using GPU to train')
else:
    print('===> Using CPU to train')

torch.manual_seed(args.seed)
if args.cuda:
    torch.cuda.manual_seed(args.seed)

print('===> Loaing datasets')
images_A = get_image_paths("data/trump")
images_B = get_image_paths("data/cage")
images_A = load_images(images_A) / 255.0
images_B = load_images(images_B) / 255.0
images_A += images_B.mean(axis=(0, 1, 2)) - images_A.mean(axis=(0, 1, 2))

model = Autoencoder()

print('===> Try resume from checkpoint')
if os.path.isdir('checkpoint'):
    try:
        checkpoint = torch.load('./checkpoint/autoencoder.t7')
        model.load_state_dict(checkpoint['state'])
        start_epoch = checkpoint['epoch']
        print('===> Load last checkpoint data')
    except FileNotFoundError:
示例#3
0
params = OrderedDict(lr=[5e-5], batch_size=[64], betas=[(0.5, 0.999)])

if args_cuda is True:
    print('===> Using GPU to train')
    device = torch.device('cuda:0')
    cudnn.benchmark = True
else:
    print('===> Using CPU to train')

torch.manual_seed(args_seed)
if args_cuda:
    torch.cuda.manual_seed(args_seed)

print('===> Loaing datasets')
images_A = get_image_paths("train/obama_face")
images_B = get_image_paths("train/hart_face")
images_A = load_images(images_A) / 255.0
images_B = load_images(images_B) / 255.0
#images_A += images_B.mean(axis=(0, 1, 2)) - images_A.mean(axis=(0, 1, 2))

for run in RunBuilder.get_runs(params):
    model_name = f'-{run}'

    model = Autoencoder().to(device)

    print('===> Try resume from checkpoint')
    if os.path.isdir('checkpoint'):
        try:
            checkpoint = torch.load('./checkpoint/' + model_name)
            model.load_state_dict(checkpoint['state'])
示例#4
0
    def train(self, force_feature_extract=False):
        """
        Train the vehicle detector.

        Splits the dataset into training and testing portion and extracts image features for both of them. Trains
        the SVM classifier using HOG features combined with spatially binned color histogram and a color histogram.

        :param force_feature_extract: extract train features from the image if true else it uses the cached ones
        :return:
        """
        cars_base_path = 'data/vehicles/'
        notcars_base_path = 'data/non-vehicles/'

        cars_gti_paths = get_image_paths(cars_base_path, ['GTI_Far', 'GTI_Left', 'GTI_MiddleClose', 'GTI_Right'])
        cars_kitti_paths = shuffle(get_image_paths(cars_base_path, ['KITTI_extracted']))
        notcars_gti_paths = get_image_paths(notcars_base_path, ['GTI'])
        notcars_extras_paths = shuffle(get_image_paths(notcars_base_path, ['Extras']))

        n_test_cars = min(len(cars_kitti_paths), int((len(cars_gti_paths) + len(cars_kitti_paths)) * 0.2))
        n_test_notcars = min(len(notcars_extras_paths), int((len(notcars_gti_paths) + len(notcars_extras_paths)) * 0.2))

        cars_train_paths = cars_gti_paths
        cars_train_paths.extend(cars_kitti_paths[:-n_test_cars])
        cars_test_paths = cars_kitti_paths[-n_test_cars:]

        notcars_train_paths = notcars_gti_paths
        notcars_train_paths.extend(notcars_extras_paths[:-n_test_notcars])
        notcars_test_paths = notcars_extras_paths[-n_test_notcars:]

        print('computing features')

        t = time.time()
        car_train_features = self.extract_train_features(cars_train_paths, 'output/car_train_features.p', force_feature_extract)
        car_test_features = self.extract_train_features(cars_test_paths, 'output/car_test_features.p', force_feature_extract)
        notcar_train_features = self.extract_train_features(notcars_train_paths, 'output/notcar_train_features.p', force_feature_extract)
        notcar_test_features = self.extract_train_features(notcars_test_paths, 'output/notcar_test_features.p', force_feature_extract)
        t2 = time.time()
        print(round(t2 - t, 2), 'Seconds to extract HOG features...')

        print('got %d training samples: %d cars, %d non cars' %
              (len(car_train_features) + len(notcar_train_features), len(car_train_features), len(notcar_train_features)))

        # Create an array stack of feature vectors
        X_train = np.vstack((car_train_features, notcar_train_features)).astype(np.float64)
        X_test = np.vstack((car_test_features, notcar_test_features)).astype(np.float64)

        self.X_scaler = StandardScaler().fit(X_train)
        X_train = self.X_scaler.transform(X_train)
        X_test = self.X_scaler.transform(X_test)

        y_train = np.hstack((np.ones(len(car_train_features)), np.zeros(len(notcar_train_features))))
        y_test = np.hstack((np.ones(len(car_test_features)), np.zeros(len(notcar_test_features))))

        print('Using:', self.colorspace, 'color space, ',  self.orient, 'orientations', self.pix_per_cell,
              'pixels per cell and', self.cell_per_block, 'cells per block')
        print('Feature vector length:', len(X_train[0]))

        t = time.time()
        self.svc = LinearSVC()
        self.svc.fit(X_train, y_train)
        t2 = time.time()

        print(round(t2 - t, 2), 'Seconds to train SVC...')
        print('Test Accuracy of SVC = ', round(self.svc.score(X_test, y_test), 4))
示例#5
0
args.cuda = not args.no_cuda and torch.cuda.is_available()

if args.cuda is True:
    print('===> Using GPU to train')
    device = torch.device('cuda:0')
    cudnn.benchmark = True
else:
    device = torch.device('cpu')
    print('===> Using CPU to train')

torch.manual_seed(args.seed)
if args.cuda:
    torch.cuda.manual_seed(args.seed)

print('===> Loaing datasets')
images_A = get_image_paths("train/personA_face")
images_B = get_image_paths("train/personB_face")
images_A = load_images(images_A) / 255.0
images_B = load_images(images_B) / 255.0
#images_A += images_B.mean(axis=(0, 1, 2)) - images_A.mean(axis=(0, 1, 2))

model = Autoencoder().to(device)

print('===> Try resume from checkpoint')
if os.path.isdir('checkpoint'):
    try:
        checkpoint = torch.load('./checkpoint/autoencoder.t7')
        model.load_state_dict(checkpoint['state'])
        start_epoch = checkpoint['epoch']
        print('===> Load last checkpoint data')
    except FileNotFoundError:
示例#6
0
args.cuda = not args.no_cuda and torch.cuda.is_available()
args.cuda = True

if args.cuda is True:
    print('===> Using GPU to train')
    device = torch.device('cuda:0')
    cudnn.benchmark = True
else:
    print('===> Using CPU to train')

torch.manual_seed(args.seed)
if args.cuda:
    torch.cuda.manual_seed(args.seed)

print('===> Loaing datasets')
images_A = get_image_paths("./face_A")#获取图片路径
images_B = get_image_paths("./face_B")
images_A = load_images(images_A) / 255.0#将图片加载到数组中,并将NumPy数组规范化到一定范围[0,255]内
images_B = load_images(images_B) / 255.0
images_A += images_B.mean(axis=(0, 1, 2)) - images_A.mean(axis=(0, 1, 2))#将图像A集加上两者图像集的平均差值(RGB三通道差值),axis=0,1,2表示在这三个轴上取平均
                                                                         #来使两个输入图像图像的分布尽可以相近,这样我们的损失函数曲线下降会更快些
model = Autoencoder().to(device)#定义模型

print('===> Try resume from checkpoint')
if os.path.isdir('checkpoint'):
    try:
        checkpoint = torch.load('./checkpoint/autoencoder.t7')
        model.load_state_dict(checkpoint['state'])
        start_epoch = checkpoint['epoch']
        print('===> Load last checkpoint data')
    except FileNotFoundError:
示例#7
0
args.cuda = not args.no_cuda and torch.cuda.is_available()

if args.cuda is True:
    print('===> Using GPU to train')
    device = torch.device('cuda:0')
    cudnn.benchmark = True
else:
    device = torch.device('cpu')
    print('===> Using CPU to train')

torch.manual_seed(args.seed)
if args.cuda:
    torch.cuda.manual_seed(args.seed)

print('===> Loading datasets')
images_A = get_image_paths('data/trump')
images_B = get_image_paths('data/cage')
images_A = load_images(images_A) / 255.0
images_B = load_images(images_B) / 255.0
images_A_mean, images_B_mean = images_A.mean(axis=(0, 1, 2)), images_B.mean(axis=(0, 1, 2))
images_A += images_B_mean - images_A_mean

model = Autoencoder().to(device)

print('===> Try resume from checkpoint')
if os.path.isdir('ml/checkpoint'):
    try:
        checkpoint = torch.load('./ml/checkpoint/autoencoder.t7')
        model.load_state_dict(checkpoint['state'])
        start_epoch = checkpoint['epoch']
        print('===> Load last checkpoint data')
示例#8
0
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()

if args.cuda is True:
    print('===> Using GPU to train')
    device = torch.device('cuda:0')
    cudnn.benchmark = True
else:
    print('===> Using CPU to train')

torch.manual_seed(args.seed)
if args.cuda:
    torch.cuda.manual_seed(args.seed)

print('===> Loaing datasets')
images_A = get_image_paths("train/trump_face")
images_B = get_image_paths("train/me_face")
images_A = load_images(images_A) / 255.0
images_B = load_images(images_B) / 255.0
#images_A += images_B.mean(axis=(0, 1, 2)) - images_A.mean(axis=(0, 1, 2))

model = Autoencoder().to(device)

print('===> Try resume from checkpoint')
if os.path.isdir('checkpoint'):
    try:
        checkpoint = torch.load('./checkpoint/autoencoder.t7')
        model.load_state_dict(checkpoint['state'])
        start_epoch = checkpoint['epoch']
        print('===> Load last checkpoint data')
    except FileNotFoundError:
示例#9
0

def BGR_to_RGB(image):
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    return image


def show_from_cv(img, title=None):
    plt.figure()
    plt.imshow(img)
    if title is not None:
        plt.title(title)
    plt.pause(0.001)


if __name__ == '__main__':

    images = get_image_paths("data/trump")
    images = images[0:100]
    images = load_images(images, convert=BGR_to_RGB)

    show_from_cv(images[0])

    grid = vis_grid(images)

    plt.figure()
    plt.imshow(grid)
    plt.pause(0.001)

    pass