Ejemplo n.º 1
0
def make_model(src_vocab, 
               tgt_vocab, 
               N=6, 
               d_model=512, 
               d_ff=2048, 
               h=8, 
               dropout=0.1):
    attn = MultiHeadedAttention(h, d_model, dropout)
    ff = PositionwiseFeedForward(d_model, d_ff, dropout)
    pe = PositionalEncoding(d_model, dropout)

    encoder_layer = EncoderLayer(d_model, copy(attn), copy(ff), dropout)
    encoder = Encoder(encoder_layer, N)

    decoder_layer = DecoderLayer(d_model, copy(attn), copy(attn), copy(ff), dropout)
    decoder = Decoder(decoder_layer, N)

    src_embed = nn.Sequential(Embedding(src_vocab, d_model), copy(pe))
    tgt_embed = nn.Sequential(Embedding(tgt_vocab, d_model), copy(pe))

    generator = Generator(d_model, tgt_vocab)

    model = EncoderDecoder(encoder, decoder, src_embed, tgt_embed, generator)

    for p in model.parameters():
        if p.dim() > 1:
            nn.init.xavier_uniform(p)
    return model
Ejemplo n.º 2
0
    def __init__(self, opt):
        super(LMGan, self).__init__()
        self.opt = opt
        self.generator = Generator(opt.vocab_size, opt.embedding_size,
                                   opt.hidden_size, opt.device)

        self.discriminator = Discriminator(
            opt.hidden_size, opt.d_hidden_size, opt.d_linear_size,
            opt.d_dropout, opt.device) if opt.adversarial else None
Ejemplo n.º 3
0
def num_param(args):
    TT = args.fc_tensorized
    config = json.load(open(CONFIG_DIR + args.config, 'r'))

    D = Discriminator(config, TT)
    G = Generator(config)
    G_params = sum(p.numel() for p in G.parameters() if p.requires_grad)
    D_params = sum(p.numel() for p in D.parameters() if p.requires_grad)
    params = G_params + D_params
    print("The model has:{} parameters".format(params))
Ejemplo n.º 4
0
    def __init__(self, config):
        super(GANAEL, self).__init__()

        self.config = config

        embedding = nn.Embedding(config.vocab_size, config.embedding_size)

        self.generator = Generator(config, embedding)

        self.discriminator = Discriminator(config, embedding)

        # tied embedding
        self.generator.embedding.weight.data = self.discriminator.embedding.weight.data
Ejemplo n.º 5
0
def test(request):
    path = r'J:\G11_origin_data'
    dirList = []
    for parent, dirnames, filenames in os.walk(path):
        for dirname in dirnames:
            dirList.append(dirname)
    dirList.sort()
    for dirname in dirList:
        if dirname > '2016-10-31':
            generator = Generator('test', dirname)
            generator.start()
            generator.join()
    return HttpResponse('done')
Ejemplo n.º 6
0
def load_checkpoints(config_path, checkpoint_path, cpu=False):
    with open(config_path) as f:
        config = yaml.load(f)

    generator = Generator(num_regions=config['model_params']['num_regions'],
                          num_channels=config['model_params']['num_channels'],
                          **config['model_params']['generator_params'])
    if not cpu:
        generator.cuda()

    region_predictor = RegionPredictor(num_regions=config['model_params']['num_regions'],
                                       num_channels=config['model_params']['num_channels'],
                                       estimate_affine=config['model_params']['estimate_affine'],
                                       **config['model_params']['region_predictor_params'])
    if not cpu:
        region_predictor.cuda()

    avd_network = AVDNetwork(num_regions=config['model_params']['num_regions'],
                             **config['model_params']['avd_network_params'])
    if not cpu:
        avd_network.cuda()

    if cpu:
        checkpoint = torch.load(checkpoint_path, map_location=torch.device('cpu'))
    else:
        checkpoint = torch.load(checkpoint_path)

    generator.load_state_dict(checkpoint['generator'])
    region_predictor.load_state_dict(checkpoint['region_predictor'])
    if 'avd_network' in checkpoint:
        avd_network.load_state_dict(checkpoint['avd_network'])

    if not cpu:
        generator = DataParallelWithCallback(generator)
        region_predictor = DataParallelWithCallback(region_predictor)
        avd_network = DataParallelWithCallback(avd_network)

    generator.eval()
    region_predictor.eval()
    avd_network.eval()

    return generator, region_predictor, avd_network
Ejemplo n.º 7
0
def train_data(configs):
    root_folder = configs['paths']['input']
    img_size = configs['hypers']["common"]['size']
    ext = configs['params']['ext']
    b_size = configs['hypers']["common"]['batch_size']

    dataset = DataClass(root_folder, img_size, ext)
    dataloader = DataLoader(dataset, batch_size=b_size, shuffle=False)

    in_features = configs["hypers"]["common"]["in_channel"]
    out_features = configs["hypers"]["common"]["out_channel"]
    max_features = configs["hypers"]["common"]["max_features"]
    num_block = configs["hypers"]["common"]["num_blocks"]
    block_expansion = configs["hypers"]["common"]["block_expansion"]
    kp_variance = configs["hypers"]["common"]["kp_variance"]
    mask_embedding_params = configs["hypers"]["generator_params"][
        "mask_embedding_params"]
    norm_const = configs["hypers"]["generator_params"]["norm_const"]
    num_group_blocks = configs["hypers"]["generator_params"][
        "num_group_blocks"]
    use_mask = configs["hypers"]["generator_params"]["use_mask"]
    use_correction = configs["hypers"]["generator_params"]["use_correction"]

    kp_detector = KeyPointDetector(in_features, out_features, max_features,
                                   num_block, block_expansion)
    generator = Generator(in_features, out_features, max_features, num_block, block_expansion,\
      kp_variance, norm_const, num_group_blocks, use_mask, use_correction, **mask_embedding_params)

    for b, img in enumerate(dataloader, 0):
        img_size = img.size()
        img = img.view(-1, img_size[2], img_size[3], img_size[4],
                       img_size[5]).permute(0, 2, 1, 3, 4)
        source_img = img[:, :, 0].unsqueeze(2)
        driving_img = img[:, :, 1].unsqueeze(2)

        kp = kp_detector(img)
        kp_split = split_kp(kp)
        predicted = generator(source_img, kp_split["kp_source"],
                              kp_split["kp_driving"])
        e()
Ejemplo n.º 8
0
def main():    
    args = parse_args()        
    root = Path(args.save_path)
    load_root = Path(args.load_path) if args.load_path else None
    print(load_root)
    root.mkdir(parents=True, exist_ok=True)

    ####################################
    # Dump arguments and create logger #
    ####################################
    with open(root / "args.yml", "w") as f:
        yaml.dump(args, f)
    writer = SummaryWriter(str(root))

    #######################
    # Load PyTorch Models #
    #######################
    netG = Generator(args.n_mel_channels).cuda()
    fft = Audio2Mel(n_mel_channels=args.n_mel_channels, mel_fmin=40, mel_fmax=None, sampling_rate=22050).cuda()

    print(netG)

    #####################
    # Create optimizers #
    #####################
    optG = torch.optim.Adam(netG.parameters(), lr=1e-4, betas=(0.5, 0.9))

    if load_root and load_root.exists():
        netG.load_state_dict(torch.load(load_root / "netG.pt"))
        optG.load_state_dict(torch.load(load_root / "optG.pt"))        
        print('checkpoints loaded')

    #######################
    # Create data loaders #
    #######################
    train_set = AudioDataset(
        Path(args.data_path) / "train_files.txt", args.seq_len, sampling_rate=22050
    )
    test_set = AudioDataset(
        Path(args.data_path) / "test_files.txt",
        ((22050*4//256)//32)*32*256,
        sampling_rate=22050,
        augment=False,
    )

    train_loader = DataLoader(train_set, batch_size=args.batch_size, num_workers=4, shuffle=True, pin_memory=True)
    test_loader = DataLoader(test_set, batch_size=1)

    mr_stft_loss = MultiResolutionSTFTLoss().cuda()
    ##########################
    # Dumping original audio #
    ##########################
    test_voc = []
    test_audio = []
    for i, x_t in enumerate(test_loader):
        x_t = x_t.cuda()
        s_t = fft(x_t).detach()

        test_voc.append(s_t.cuda())
        test_audio.append(x_t.cpu())

        audio = x_t.squeeze().cpu()
        save_sample(root / ("original_%d.wav" % i), 22050, audio)
        writer.add_audio("original/sample_%d.wav" % i, audio, 0, sample_rate=22050)

        if i == args.n_test_samples - 1:
            break

    costs = []
    start = time.time()

    # enable cudnn autotuner to speed up training
    torch.backends.cudnn.benchmark = True
    best_mel_reconst = 1000000
    steps = 0
    for epoch in range(1, args.epochs + 1):
        for iterno, x_t in enumerate(train_loader):            
            x_t = x_t.cuda()            
            s_t = fft(x_t).detach()
            n = torch.randn(x_t.shape[0], 128, 1).cuda()
            x_pred_t = netG(s_t.cuda(), n)            
            
            ###################
            # Train Generator #
            ###################            
            with torch.no_grad():
                s_pred_t = fft(x_pred_t.detach())
                s_error = F.l1_loss(s_t, s_pred_t).item()
                
            sc_loss, mag_loss = mr_stft_loss(x_pred_t, x_t)
            
            loss_G = sc_loss + mag_loss
            
            netG.zero_grad()
            loss_G.backward()
            optG.step()

            ######################
            # Update tensorboard #
            ######################
            costs.append([loss_G.item(), sc_loss.item(), mag_loss.item(), s_error])
            
            writer.add_scalar("loss/generator", costs[-1][0], steps)
            writer.add_scalar("loss/spectral_convergence", costs[-1][1], steps)
            writer.add_scalar("loss/log_spectrum", costs[-1][2], steps)
            writer.add_scalar("loss/mel_reconstruction", costs[-1][3], steps)
            steps += 1

            if steps % args.save_interval == 0:
                st = time.time()
                with torch.no_grad():
                    for i, (voc, _) in enumerate(zip(test_voc, test_audio)):
                        n = torch.randn(1, 128, 10).cuda()
                        pred_audio = netG(voc, n)
                        pred_audio = pred_audio.squeeze().cpu()
                        save_sample(root / ("generated_%d.wav" % i), 22050, pred_audio)
                        writer.add_audio(
                            "generated/sample_%d.wav" % i,
                            pred_audio,
                            epoch,
                            sample_rate=22050,
                        )

                torch.save(netG.state_dict(), root / "netG.pt")
                torch.save(optG.state_dict(), root / "optG.pt")
                                
                if np.asarray(costs).mean(0)[-1] < best_mel_reconst:
                    best_mel_reconst = np.asarray(costs).mean(0)[-1]                    
                    torch.save(netG.state_dict(), root / "best_netG.pt")

                print("Took %5.4fs to generate samples" % (time.time() - st))
                print("-" * 100)

            if steps % args.log_interval == 0:
                print(
                    "Epoch {} | Iters {} / {} | ms/batch {:5.2f} | loss {}".format(
                        epoch,
                        iterno,
                        len(train_loader),
                        1000 * (time.time() - start) / args.log_interval,
                        np.asarray(costs).mean(0),
                    )
                )
                costs = []
                start = time.time()
Ejemplo n.º 9
0
    parser.set_defaults(verbose=False)

    opt = parser.parse_args()
    with open(opt.config) as f:
        config = yaml.load(f)

    if opt.checkpoint is not None:
        log_dir = os.path.join(*os.path.split(opt.checkpoint)[:-1])
    else:
        log_dir = os.path.join(opt.log_dir,
                               os.path.basename(opt.config).split('.')[0])
        log_dir += ' ' + strftime("%d_%m_%y_%H.%M.%S", gmtime())

    generator = Generator(
        num_regions=config['model_params']['num_regions'],
        num_channels=config['model_params']['num_channels'],
        revert_axis_swap=config['model_params']['revert_axis_swap'],
        **config['model_params']['generator_params'])

    if torch.cuda.is_available():
        generator.to(opt.device_ids[0])
    if opt.verbose:
        print(generator)

    region_predictor = RegionPredictor(
        num_regions=config['model_params']['num_regions'],
        num_channels=config['model_params']['num_channels'],
        estimate_affine=config['model_params']['estimate_affine'],
        **config['model_params']['region_predictor_params'])

    if torch.cuda.is_available():
Ejemplo n.º 10
0
def train(args):
    pre_trained = args.pre_trained
    PATH = args.path_results
    lrD = args.lrD
    lrG = args.lrG
    epochs = args.epochs
    batch_size = args.batch
    device = args.device
    save_every = args.save_every
    data = args.data
    config = json.load(open(CONFIG_DIR + args.config, 'r'))
    TT = args.fc_tensorized

    print(TT)

    # Create directory for results
    if not os.path.isdir(PATH):
        os.mkdir(PATH)
    # Create directory for specific run
    if TT:
        PATH = PATH + "/{}_ttfc".format(config["id"])
    else:
        PATH = PATH + "/{}".format(config["id"])
    if not os.path.isdir(PATH):
        os.mkdir(PATH)
    if not os.path.isdir(PATH + '/Random_results'):
        os.mkdir(PATH + '/Random_results')
    if not os.path.isdir(PATH + '/Fixed_results'):
        os.mkdir(PATH + '/Fixed_results')

    print("### Loading data ###")
    train_loader = load_dataset(data, batch_size, is_train=True)
    print("### Loaded data ###")

    print("### Create models ###")
    D = Discriminator(config, TT).to(device)
    G = Generator(config).to(device)
    model_parameters = filter(lambda p: p.requires_grad, D.parameters())
    params = sum([np.prod(p.size()) for p in model_parameters])
    model_parameters = filter(lambda p: p.requires_grad, G.parameters())
    params += sum([np.prod(p.size()) for p in model_parameters])
    print("The model has:{} parameters".format(params))
    if pre_trained:
        D.encoder.load()
        G.decoder.load()

    G_optimizer = optim.Adam(G.parameters(), lr=lrG, betas=(0.5, 0.999))
    D_optimizer = optim.Adam(D.parameters(), lr=lrD, betas=(0.5, 0.999))

    train_hist = {'D_losses': [], 'G_losses': [], 'G_fix_losses': []}

    BCE_loss = nn.BCELoss()
    fixed_z_ = torch.randn((5 * 5, 100)).to(device)  # fixed noise
    for epoch in range(epochs):
        if epoch == 1 or epoch % save_every == 0:
            D_test = copy.deepcopy(D)
        D_losses = []
        G_losses = []
        G_fix_losses = []
        for x, _ in train_loader:
            x = x.to(device)
            D_loss = D.train_step(x, G, D_optimizer, BCE_loss, device)
            G_loss = G.train_step(D, batch_size, G_optimizer, BCE_loss, device)
            # G_fix_loss = G.evaluate(
            #     D_test,
            #     batch_size,
            #     BCE_loss,
            #     device
            # )

            D_losses.append(D_loss)
            G_losses.append(G_loss)
            # G_fix_losses.append(G_fix_loss)

        meanDloss = torch.mean(torch.FloatTensor(D_losses))
        meanGloss = torch.mean(torch.FloatTensor(G_losses))
        meanGFloss = torch.mean(torch.FloatTensor(G_fix_losses))
        train_hist['D_losses'].append(meanDloss)
        train_hist['G_losses'].append(meanGloss)
        train_hist['G_fix_losses'].append(meanGFloss)
        print(
            "[{:d}/{:d}]: loss_d: {:.3f}, loss_g: {:.3f}, loss_g_fix: {:.3f}".
            format(epoch + 1, epochs, meanDloss, meanGloss, meanGFloss))
        p = PATH + '/Random_results/MNIST_DCGAN_' + str(epoch + 1) + '.png'
        fixed_p = PATH + '/Fixed_results/MNIST_DCGAN_' + str(epoch +
                                                             1) + '.png'
        z_ = torch.randn((5 * 5, 100)).to(device)
        show_result(G,
                    100,
                    fixed_z_,
                    z_, (epoch + 1),
                    save=True,
                    path=p,
                    isFix=False)
        show_result(G,
                    100,
                    fixed_z_,
                    z_, (epoch + 1),
                    save=True,
                    path=fixed_p,
                    isFix=True)

    print("Training complete. Saving.")
    save_models(D, G, PATH, train_hist, epochs)
    show_train_hist(train_hist,
                    save=True,
                    path=PATH + '/MNIST_DCGAN_train_hist.png')
    save_gif(PATH, epochs)

    return D, G
Ejemplo n.º 11
0
if DEBUG:
    test_id = '14'  # Redni broj test primera [01-15]
    path_root = './'
    args = {}
    args['src'] = f'{path_root}{test_id}/src.pas'  # Izvorna PAS datoteka
    args['gen'] = f'{path_root}{test_id}/gen.c'  # Generisana C datoteka
else:
    import argparse

    arg_parser = argparse.ArgumentParser()
    arg_parser.add_argument('src')  # Izvorna PAS datoteka
    arg_parser.add_argument('gen')  # Generisana C datoteka
    args = vars(arg_parser.parse_args())

with open(args['src'], 'r') as source:
    text = source.read()
    lexer = Lexer(text)
    tokens = lexer.lex()
    parser = Parser(tokens)
    ast = parser.parse()
    # grapher = Grapher(ast)
    # grapher.graph()
    symbolizer = Symbolizer(ast)
    symbolizer.symbolize()
    generator = Generator(ast)
    generator.generate(args['gen'])
    runner = Runner(ast)
    runner.run()

# ACINONYX - END
Ejemplo n.º 12
0
                        help='Names of the devices comma separated.')
    parser.add_argument('--verbose', dest='verbose', action='store_true', help='Print model architecture')
    parser.add_argument('--cpu', default=False, action='store_true', help='Run on cpu')
    parser.set_defaults(verbose=False)

    opt = parser.parse_args()
    with open(opt.config) as f:
        config = yaml.safe_load(f)

    if opt.checkpoint is not None:
        log_dir = os.path.join(*os.path.split(opt.checkpoint)[:-1])
    else:
        log_dir = os.path.join(opt.log_dir, os.path.basename(opt.config).split('.')[0])
        log_dir += ' ' + strftime('%d_%m_%y_%H.%M.%S', gmtime())

    generator = Generator(**config['model_params']['generator_params'],
                          **config['model_params']['common_params'])

    if torch.cuda.is_available():
        generator.to(opt.device_ids[0])
    if opt.verbose:
        print(generator)

    checkpoint_with_kp = torch.load(opt.checkpoint_with_kp, map_location='cpu' if opt.cpu else None)

    kp_detector = KPDetector(checkpoint_with_kp, **config['model_params']['kp_detector_params'],
                             **config['model_params']['common_params'])

    if torch.cuda.is_available():
        kp_detector.to(opt.device_ids[0])

    if opt.verbose:
Ejemplo n.º 13
0
	def __init__(
		self,
		encoder_dict,
		encoder_padding_idx,
		encoder_emb_size,
		encoder_hid_size,
		encoder_bidirectional,
		encoder_rnn_cell_type,
		encoder_is_packed,
		encoder_batch_first,
		encoder_num_layers,
		encoder_dropout,
		decoder_dict,
		decoder_padding_idx,
		decoder_emb_size,
		decoder_hid_size,
		decoder_rnn_cell_type,
		decoder_num_layers,
		decoder_dropout,
		global_attention_type,
		generator_dim_lst,
		generator_num_layers
	):
		super(GlobalAttentionSeq2Seq, self).__init__()

		self.name = 'GlobalAttentionSeq2Seq'

		self.encoder = Encoder(
			encoder_dict,
			encoder_padding_idx,
			encoder_emb_size,
			encoder_hid_size,
			encoder_bidirectional,
			encoder_rnn_cell_type,
			encoder_is_packed,
			encoder_batch_first,
			encoder_num_layers,
			encoder_dropout
		)

		self.bridge = Bridge(
			encoder_bidirectional,
			encoder_num_layers,
			encoder_hid_size,
			encoder_rnn_cell_type,
			decoder_num_layers,
			decoder_hid_size,
			decoder_rnn_cell_type
		)

		self.decoder = GlobalAttentiveDecoder(
			decoder_dict,
			decoder_padding_idx,
			decoder_emb_size,
			decoder_hid_size,
			decoder_rnn_cell_type,
			decoder_num_layers,
			decoder_dropout,
			encoder_hid_size,
			global_attention_type,
			encoder_bidirectional
		)

		self.generator = Generator(
			decoder_dict.size(),
			decoder_hid_size,
			generator_dim_lst,
			generator_num_layers
		)