import os import random import time import numpy as np import torch import torchvision.transforms as transforms from PIL import Image from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence from torch.utils.data import DataLoader, Dataset import dataset import utils from cnn import resnet50 device = utils.selectDevice() # ----------------------------------------------------------------- # To save the numpy array into the file, there are several options # Machine readable: # - ndarray.dump(), ndarray.dumps(), pickle.dump(), pickle.dumps(): # Generate .pkl file. # - np.save(), np.savez(), np.savez_compressed() # Generate .npy file # - np.savetxt() # Generate .txt file. # ----------------------------------------------------------------- def video_to_features(data_path): """ Transfer the training set and validation set videos into features """
Synopsis [ Generate images from GAN / ACGAN. ] """ import argparse import os import numpy as np import torch import torch.nn as nn import torchvision from torchvision.utils import save_image import utils from GAN.model import DCGAN_Generator DEVICE = utils.selectDevice() class ACGAN_Generator(nn.Module): def __init__(self): super(ACGAN_Generator, self).__init__() self.linear = nn.Linear(102, 512 * 4 * 4) self.bn0 = nn.BatchNorm2d(512) self.relu0 = nn.ReLU(inplace=True) self.conv_blocks = nn.Sequential( nn.ConvTranspose2d(512, 256, kernel_size=4, stride=2, padding=1, bias=False), nn.BatchNorm2d(256), nn.ReLU(inplace=True), nn.ConvTranspose2d(256, 128, 4, 2, 1, bias=False),
def main(): transform = transforms.Compose([ transforms.Resize((448, 448)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) grid_num = 7 if args.command == "basic" else 14 trainset = dataset.MyDataset(root="hw2_train_val/train15000", grid_num=grid_num, train=args.augment, transform=transform) testset = dataset.MyDataset(grid_num=grid_num, root="hw2_train_val/val1500", train=False, transform=transform) trainLoader = DataLoader(trainset, batch_size=args.batchs, shuffle=True, num_workers=args.worker) testLoader = DataLoader(testset, batch_size=1, shuffle=False, num_workers=args.worker) device = utils.selectDevice(show=True) if args.command == "basic": model = models.Yolov1_vgg16bn(pretrained=True).to(device) criterion = models.YoloLoss(7., 2., 5., 0.5, device).to(device) optimizer = optim.SGD(model.parameters(), lr=args.lr, weight_decay=1e-4) scheduler = optim.lr_scheduler.MultiStepLR(optimizer, [20, 45, 55], gamma=0.1) start_epoch = 0 if args.load: model, optimizer, start_epoch, scheduler = utils.loadCheckpoint( args.load, model, optimizer, scheduler) model = train(model, criterion, optimizer, scheduler, trainLoader, testLoader, start_epoch, args.epochs, device, lr=args.lr, grid_num=7) elif args.command == "improve": model_improve = models.Yolov1_vgg16bn_Improve( pretrained=True).to(device) criterion = models.YoloLoss(14., 2., 5, 0.5, device).to(device) optimizer = optim.SGD(model_improve.parameters(), lr=args.lr, weight_decay=1e-4) scheduler = optim.lr_scheduler.MultiStepLR(optimizer, [20, 40, 70], gamma=0.1) start_epoch = 0 if args.load: model_improve, optimizer, start_epoch, scheduler = utils.loadCheckpoint( args.load, model, optimizer, scheduler) model_improve = train(model_improve, criterion, optimizer, scheduler, trainLoader, testLoader, start_epoch, args.epochs, device, lr=args.lr, grid_num=7, save_name="Yolov1-Improve")