示例#1
0
def main():
    train_loader, val_loader, input_size = get_data(100)

    show_batch(train_loader)

    # define agents
    hidden_size = 32
    n_actions = 2
    vocab_size = 11

    # train hyperparams
    num_episodes = 100
    lr = 0.1

    send = Sender(in_size=input_size, hidden_size=hidden_size, 
            vocab_len=vocab_size, lr=lr)

    recv = Receiver(in_size=input_size, vocabulary_size_sender = vocab_size, 
        hidden_size=hidden_size, n_actions = n_actions, lr=lr)


    def batch(send, recv, images_batch, labels_batch, send_opt=None, recv_opt=None):

        imgsa_s, imgsb_s, imgsa_r, imgsb_r, targets, _ = get_images(images_batch, labels_batch)

        probs_s, message, logprobs_s, entropy_s = send.model(imgsa_s, imgsb_s)
        probs_r, actions, logprobs_r, entropy_r = recv.model(imgsa_r, imgsb_r, message.detach())

        error = reward(actions, targets) #torch.abs(act - targets) il - e` gia nell'update mi pare
        acc = accuracy(actions, targets) #torch.mean(error.detach().double())

        send_loss = send.loss(error, logprobs_s, entropy_s)
        recv_loss = recv.loss(error, logprobs_r, entropy_r)

        if send_opt is not None:

            # SENDER LOSS
            send_opt.zero_grad()
            send_loss.backward()
            send_opt.step()

        if recv_opt is not None:

            # RECEIVER LOSS
            recv_opt.zero_grad()
            recv_loss.backward()
            recv_opt.step()

        return error, send_loss, recv_loss, len(imgsa_s), acc

    # UPLOAD MODELS

    #send.model.load_state_dict(torch.load('sender_model_mnist.pth'))
    #recv.model.load_state_dict(torch.load('receiver_model_mnist.pth'))

    send_opt = Adam(send.model.parameters(), lr=lr)
    recv_opt = Adam(recv.model.parameters(), lr=lr)
    print("lr=", lr)

    #TRAIN LOOP

    train_send_losses, train_recv_losses, val_send_losses, val_recv_losses, val_accuracy = [], [], [], [], []

    for ep in range(num_episodes):
        print("episode=", ep)

        # TRAIN STEP
        print("train")
        for imgs, labs in train_loader:

            train_error, train_send_loss, train_recv_loss, _, train_acc = batch(send, recv, imgs, labs, send_opt, recv_opt)

        print("evaluation")
        # EVALUATION STEP
        with torch.no_grad():

            results = [ batch(send, recv, imgs, labs) for imgs, labs in val_loader ]
            
        val_error, val_send_loss, val_recv_loss, nums, val_acc = zip(*results)

        total = np.sum(nums)
        send_train_avg_loss = np.sum(np.multiply(train_send_loss.detach().numpy(), nums))/total
        recv_train_avg_loss = np.sum(np.multiply(train_recv_loss.detach().numpy(), nums))/total
        train_send_losses.append(send_train_avg_loss)
        train_recv_losses.append(recv_train_avg_loss)

        send_val_avg_loss = np.sum(np.multiply(val_send_loss, nums))/total
        recv_val_avg_loss = np.sum(np.multiply(val_recv_loss, nums))/total
        val_send_losses.append(send_val_avg_loss)
        val_recv_losses.append(recv_val_avg_loss)
            
        val_avg_accuracy = np.sum(np.multiply(val_acc, nums))/total
        val_accuracy.append(val_avg_accuracy)

        print("sender train loss", send_train_avg_loss)
        print("receiver train loss", recv_train_avg_loss)
        print("sender val loss", send_val_avg_loss)
        print("receiver val loss", recv_val_avg_loss)
        print("accuracy", val_avg_accuracy)
        print("\n")

    torch.save(send.model.state_dict(), 'sender_model_cifar_01.pth')
    torch.save(recv.model.state_dict(), 'receiver_model_cifar_01.pth')
示例#2
0
    if args.device == 'cuda':
        if torch.cuda.is_available():
            device = torch.device('cuda')
    else:
        device = torch.device('cpu')

    gen = Generator(128, 3, 64)
    load_gen(gen, args.path_ckpt, device)
    gen.eval()

    if args.grid:
        noise = get_random_noise(args.num_samples, args.z_size, device)
        print("==> Generate IMAGE GRID...")
        output = gen(noise)
        show_batch(output, out_path, num_samples=args.num_samples, figsize=(args.img_size, args.img_size))
    elif args.gif:
        noise = get_random_noise(args.num_samples, args.z_size, device)
        print("==> Generate GIF...")
        images = latent_space_interpolation_sequence(noise, step_interpolation=args.steps)
        output = gen(images)
        if args.resize and isinstance(args.resize, int):
            print(f"==> Resize images to {args.resize}px")
            output = F.interpolate(output, size=args.resize)

        images = []
        for img in output:
            img = img.detach().permute(1, 2, 0)
            images.append(img.numpy())
        save_img_name = 'result.gif'
        save_path = os.path.join(out_path, save_img_name)
from transformations.networks_transforms import c3d_fe_transform

if __name__=='__main__':

    DN = DynamicImage(output_type="pil")
    di_t = DIPredefinedTransforms(size=224, tmp_transform=DN)
    ST = di_t.val_transform
    dataset = VideoDataset(clip_length=16,
                        frame_stride=1,
                        frame_rate=25,
                        dataset_path= "/Users/davidchoqueluqueroman/Documents/DATASETS_Local/RWF-2000/videos/train", #"/Volumes/TOSHIBA EXT/DATASET/AnomalyCRIMEALL/Anomaly-Videos-All",#"/Users/davidchoqueluqueroman/Documents/DATASETS_Local/hmdb51/hmdb51_org",#"/Volumes/TOSHIBA EXT/DATASET/HockeyFight/videos",
                        temporal_transform=DN,
                        spatial_transform=ST)#"/Users/davidchoqueluqueroman/Documents/CODIGOS/DATASETS/UCFCrime/Abuse")
    
   

    batch=[]
    for i in range(15):
        video, label, (clip_idx, dir, file) = dataset[i]
        print("video:", type(video), video.dtype, video.size())
        print("dir:",dir)
        print("file:", file)
        print("clip_idx:", clip_idx)
        batch.append(video)
    batch = torch.stack(batch, dim=0)
    print("batch: ", batch.size())
    grid = torchvision.utils.make_grid(batch, nrow=6, padding=50)
    show_batch(grid)
    plt.show()

   
    d = VideoImageDataset(root="",
                          frames_per_clip=16,
                          number_of_clips=0,
                          make_function=m,
                          stride=1,
                          overlap=0,
                          position="",
                          padding=False,
                          return_metadata=True,
                          temporal_transform=temporal_transform,
                          spatial_transform=spatial_transform
                          )  #spatial_transform.train_transform
    # import random
    # v, l, path = d[random.randint(0,len(d)-1)]
    # print("video:", path, v.size(), "label: ", l)
    # grid = torchvision.utils.make_grid(v, nrow=10, padding=50)
    # show_batch(grid)
    # plt.show()

    data_iter = torch.utils.data.DataLoader(d,
                                            batch_size=1,
                                            shuffle=True,
                                            num_workers=1,
                                            pin_memory=True)
    for v, l, path in data_iter:
        v = torch.squeeze(v)
        print("video:", path, v.size(), "label: ", l)
        grid = torchvision.utils.make_grid(v, nrow=10, padding=50)
        show_batch(grid, title=os.path.split(path[0])[1])
        plt.show()
print(args)
logging.info(args)

dataloaders, dataset_sizes, class_names = get_data_loaders(
    args.train_data, args.batch_size)
logging.info("Train size {}, Val size {}, Test size {}".format(
    dataset_sizes['train'], dataset_sizes['val'], dataset_sizes['test']))
logging.info('Class names:{}'.format(class_names))
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
if torch.cuda.is_available():
    total_gpus = torch.cuda.device_count()
    logging.info('Total number of GPUs:{}'.format(total_gpus))
    if total_gpus == 1:
        multi_gpu = False
    elif total_gpus > 1:
        multi_gpu = True

else:
    print("No GPUs, Cannot proceed. This training regime needs GPUs.")
    exit(1)

nb_classes = len(class_names)
# Get a batch of training data and show it
inputs, classes = next(iter(dataloaders['train']))
out = torchvision.utils.make_grid(inputs)
show_batch(out, title=[class_names[x] for x in classes])

model, train_losses, val_losses = configure_run_model()
display_losses(train_losses, val_losses, 'Train-Val Loss')
test_model(model, dataloaders, device)
示例#6
0
from data.dataset import ImgFolderDataset
from torch.utils.data import DataLoader
from utils import show_batch

if __name__ == '__main__':
    path = ''
    dataset = ImgFolderDataset(path)
    dataloader = DataLoader(dataset,
                            batch_size=16,
                            num_workers=2,
                            shuffle=True)

    batch = next(iter(dataloader))
    show_batch(batch, size=8, shape=(8, 8), save='/home/mirage/')