コード例 #1
0
ファイル: helper.py プロジェクト: gridl/videoAd
    def get_play_list(self):
        """
        Base method, run generate playlist
        """
        videoal = list(self.day.video_ad.all()) + list(self.day.image_ad.all())
        textal = list(self.day.text_ad.all())

        video_ad_list = Generator(videoal)
        text_ad_list = Generator(textal)

        chunk_len = self.day.video_count
        chunk_len_txt = self.day.text_count

        block, playlist = {}, []

        ctime = self.time_to_seconds(self.day.start_time)
        while ctime < self.time_to_seconds(self.day.stop_time):
            next_chunk = videoal if chunk_len == 0 else video_ad_list[:
                                                                      chunk_len]
            next_chunk_text = textal if chunk_len_txt == 0 else text_ad_list[:
                                                                             chunk_len_txt]

            # next chunk duration
            nxd = self.get_chunk_duration(next_chunk)
            im = self.is_immediately(ctime, ctime + nxd)

            # ad
            playlist.append({
                'time':
                str(self.seconds_to_time(ctime)),
                'params':
                self.get_params_list(next_chunk + next_chunk_text)
            })

            if im:
                # immediately
                playlist.append({
                    'time': str(im['time']),
                    'params': im['params']
                })
                ctime = im['end']
            else:
                ctime += nxd

            # Infinity loop
            if not nxd:
                return playlist

        return playlist
コード例 #2
0
ファイル: train.py プロジェクト: uidchet/tflite-issue
    def start_tarining(self):

        # Get images and labels.
        image_gen = Generator("train")

        #distributed dataset
        dataset = tf.data.Dataset.from_generator(
            image_gen.generator,
            output_types=(tf.float32, tf.float32),
            output_shapes=(tf.TensorShape([256, 256,
                                           6]), tf.TensorShape([32, 32, 1])))
        dataset = dataset.batch(self.global_batch_size)
        dist_dataset = self.strategy.experimental_distribute_dataset(dataset)

        step_per_epoch = int(
            math.floor(
                len(image_gen.data_list_with_labels) / self.global_batch_size))

        # initialize loss plots in mlflow
        mlflow_plot = PlotLosses()

        # begin training
        with self.strategy.scope():
            for epoch in range(0, 1):
                start = time.time()
                ### train phase ###
                iterator = iter(dist_dataset)
                for step in range(step_per_epoch):
                    dep_map_loss = self.train(next(iterator))
コード例 #3
0
def make_model(cnn3d,
               tgt_vocab,
               N=6,
               d_model=512,
               d_ff=2048,
               h=8,
               dropout=0.1):
    "Helper: Construct a model from hyperparameters."
    c = copy.deepcopy
    attn = MultiHeadedAttention(h, d_model)
    ff = PositionwiseFeedForward(d_model, d_ff, dropout)
    position = PositionalEncoding(d_model, dropout)
    model = EncoderDecoder(
        Encoder(EncoderLayer(d_model, c(attn), c(ff), dropout), N),
        Decoder(DecoderLayer(d_model, c(attn), c(attn), c(ff), dropout), N),
        nn.Sequential(c(position)),
        nn.Sequential(Embeddings(d_model, tgt_vocab), c(position)),
        Generator(d_model, tgt_vocab), cnn3d)

    # This was important from their code.
    # Initialize parameters with Glorot / fan_avg.
    for p in model.named_parameters():
        if not p[0].startswith(
                "cnn3d") and p[1].requires_grad and p[1].dim() > 1:
            nn.init.xavier_uniform_(p[1])

    return model
コード例 #4
0
 def __init__(
         self,
         data_token_path="./indicTranslation/hi_en_t2t_v3/token_data/",
         model_path="./indicTranslation/hi_en_t2t_v3/hi2enTranslation.pt",
         tokenizer_path="./indicTranslation/sentencepiece.bpe.model"):
     if not os.path.exists(model_path):
         url = "https://drive.google.com/uc?id=1rrySQx5FJ-IxCPiLxJFYU5_IfwAyeEGI"
         gdown.download(url, model_path, quiet=False)
     self.gen = Generator(data_token_path, model_path)
     self.tokObj = minTokenizer(tokenizer_path)
コード例 #5
0
 def __init__(
         self,
         data_token_path="./indicTranslation/en_hi_t2t_v3/token_data/",
         model_path="./indicTranslation/en_hi_t2t_v3/en2hiTranslation.pt",
         tokenizer_path="./indicTranslation/sentencepiece.bpe.model"):
     if not os.path.exists(model_path):
         url = "https://drive.google.com/uc?id=1uWmlwYxISz5CB33BQQim1OulKbKHqj-W"
         gdown.download(url, model_path, quiet=False)
     self.gen = Generator(data_token_path, model_path)
     self.tokObj = minTokenizer(tokenizer_path)
コード例 #6
0
ファイル: UserIntrface.py プロジェクト: 4672513gg/3118005358
    def get_(self):
        self.order += 1
        # 判断是否已经存在相关文件
        if os.path.exists('./docs/Exercises.txt'):
            os.remove('./docs/Exercises.txt')
        if os.path.exists('./docs/Answer.txt'):
            os.remove('./docs/Answer.txt')

        try:
            n = int(self.entry_input_num.get())
            r = int(self.entry_input_range.get())
            if r < 2 or n < 1:
                tk.messagebox.showinfo("Info", "输入非法")
            elif n > 30000 or r < 50:
                tk.messagebox.showinfo("Info", "生成时间将较长,请耐心等待")
                Generator(n, r, self.order).multi_processor()
            else:
                Generator(n, r, self.order).multi_processor()
            tk.messagebox.showinfo("Info", "Success")
            self.open_explorer()
        except ValueError:
            tk.messagebox.showinfo("Info", "输入非法")
コード例 #7
0
def main(conf=Config):
    tools.backup(conf)

    X = tf.placeholder(tf.float32, shape=[256, 64, 64, 3], name="X")
    Y = tf.placeholder(tf.float32, shape=[256, Config.label_size], name="Y")

    # Generator feeding data
    G = Generator(conf=conf)
    (G_X, G_Y, G_name, G_offset_x, G_offset_y, G_noise) = G.read()

    # Network building graph
    N = Network(conf=conf)
    N.train(X, Y, G_X, G_Y)
コード例 #8
0
}

train_image_var = tf.placeholder(
    tf.float32,
    [flags['batch_size'], flags['image_size'], flags['image_size'], 3])
train_label_var = tf.placeholder(tf.float32, [flags['batch_size'], 2])
is_training = tf.placeholder(tf.bool)

train_dataset_list = [
    generate_dataflow(train_dic['positive'] + train_dic['negative'],
                      dataflow_option) for _ in range(num_gpu)
]
train_generators = [
    Generator({
        'dataset': train_dataset_list[i],
        'placeholders': [train_image_var, train_label_var],
        'queue_size': 100
    }) for i in range(num_gpu)
]

log_print('[i] generate dataset and generators', log_txt_path)

#######################################################################################
# 3. Optimizer
#######################################################################################
global_step = tf.placeholder(dtype=tf.int32)

warmup_lr_op = tf.to_float(global_step) / tf.to_float(
    flags['warmup_iteration']) * flags['init_learning_rate']
decay_lr_op = tf.train.cosine_decay(
    flags['init_learning_rate'],
コード例 #9
0
from utils import get_translation, Generator, sp
import sentencepiece as spm

data_token_path = "./en_hi_t2t_v3/token_data/"
model_checkpoint = "./en_hi_t2t_v3/checkpoints/checkpoint9.pt"
sp_path = "./sentencepiece.bpe.model"
srclang = "en"  ### hi for hi2en

gen = Generator(data_token_path, model_checkpoint)
print("Model Checkpoint Load Complete")
sp.load(sp_path)
print("Tokenizer Load Complete\n")

while True:
    text = input("English Text : ")
    outtext = get_translation(gen, sp, text, srclang)
    print("Hindi Text : %s\n" % outtext)
コード例 #10
0
plt.show()

batch_size = 64
num_channels = batch[0][0].shape[0]
img_size = batch[0][0].shape[1]
channel_noise = 256
lr = 0.0005
features_d = 16
features_g = 16
num_epoch = 10
device = 'cuda' if torch.cuda.is_available() else 'cpu'

print(batch_size, num_channels, img_size, img_size)

descr = Descriminator(num_channels, features_d).to(device)
gen = Generator(num_channels, features_g, channel_noise).to(device)
optim_d = torch.optim.Adam(descr.parameters(), lr=lr, betas=(0.5, 0.999))
optim_g = torch.optim.Adam(gen.parameters(), lr=lr, betas=(0.5, 0.999))
criterion = nn.BCELoss()

descr.train()
gen.train()

label_real = 1
label_fake = 0
initial_noise = torch.randn(64, channel_noise, 1, 1).to(device)
writer_for_real = SummaryWriter('logs/gan/real')
writer_for_fake = SummaryWriter('logs/gan/fake')

print(f'training...')
for epoch in range(1, num_epoch+1):
コード例 #11
0
args = get_args()

n_samples = args.n_data
n_epochs = args.n_epochs
batch_size = args.batch_size
m = 5
gmm = GMM(n_gaussians=2, dim=2, random_seed=22)
sample_data = torch.Tensor(gmm.sample(n_samples))

dataloader_train = DataLoader(sample_data, batch_size=batch_size)

noise = torch.rand(n_samples, 2)
fixed_noise = torch.rand(n_samples, 2)

netG = Generator(sample_data.numpy())
netD = Discrimator()

if torch.cuda.is_available():
    netG = netG.cuda()
    netD = netD.cuda()
    fixed_noise = fixed_noise.cuda()

optim_netD = torch.optim.Adam(netD.parameters(), lr=args.lr)
optim_netG = torch.optim.Adam(netG.parameters(), lr=args.lr)

for epoch_i in range(n_epochs):
    epoch_discriminator_losses = []
    epoch_generator_losses = []

    for _, batch_real in enumerate(dataloader_train):
コード例 #12
0
        
        'num_prefetch_for_dataset' : 10,
        'num_prefetch_for_batch' : 5,
        
        'number_of_cores' : 2,
    }

    train_image_var = tf.placeholder(tf.float32, [None, flags.image_size, flags.image_size, 3])
    train_label_var = tf.placeholder(tf.float32, [None, len(class_names)])
    train_image_paths_var = tf.placeholder(tf.string, [None])
    is_training = tf.placeholder(tf.bool)
    
    generator_func = lambda ds: Generator({
        'dataset' : ds, 
        'placeholders' : [train_image_var, train_label_var, train_image_paths_var], 

        'queue_size' : 10, 
        'batch_size' : flags.batch_size // num_gpu,
    })

    dataset = []
    for class_name in class_names:
        dataset += train_dic[class_name]
    
    if flags.OAA_update_iteration == -1:
        flags.OAA_update_iteration = len(dataset) // flags.batch_size
        log_print('[i] calculate 1 epoch = {} iteration'.format(flags.OAA_update_iteration), log_txt_path)    

    train_dataset_list = [generate_dataflow(dataset, dataflow_option) for _ in range(num_gpu)]
    train_generators = [generator_func(train_dataset_list[i]) for i in range(num_gpu)]
    
コード例 #13
0
                # Weakly_Augment(),
            ],
            'shuffle': False,
            'remainder': False,
            'batch_size': 64,
            'image_size': (224, 224),
            'num_prefetch_for_dataset': 10,
            'num_prefetch_for_batch': 2,
        })

    image_var = tf.placeholder(tf.float32, [64, 224, 224, 3])
    label_var = tf.placeholder(tf.float32, [64, 2])

    generator = Generator({
        'dataset': train_dataset,
        'placeholders': [image_var, label_var],
        'queue_size': 10,
    })

    images_op, labels_op = generator.dequeue()

    ###############################################
    # Run
    ###############################################
    sess = tf.Session()
    coord = tf.train.Coordinator()

    generator.set_session(sess)
    generator.set_coordinator(coord)
    generator.start()
コード例 #14
0
    np.random.seed(7)

    logger.info("\nStaring training process\n")

    config = parse_arguments()

    dataset = load_data_from_config(config['LOADING'])

    data_preprocessed = preprocess_data(
        dataset,
        rm_stopwords=config['PREPROCESS'].getboolean('rm_stopwords', True),
        stemming=config['PREPROCESS'].getboolean('stem', True))

    data_doc2vec = DocumentsTagged(data_preprocessed)

    gen_data_doc2vec = Generator(data_doc2vec)

    corpus = load_and_process(gen_data_doc2vec)

    if config['TRAIN'].getboolean('downsample'):
        corpus = downsample(corpus)

    models_to_train = ModelsLoader.load_models_from_config(
        config['LOADING'], config['PARAMETERS'])

    models = ModelsTrainer.init_models(models_to_train, corpus)

    ModelsTrainer.train_from_config(models, corpus, config['TRAIN'])

    q_checker = QualityChecker(models, corpus)
コード例 #15
0
import torch
import numpy as np
from utils import Generator
import matplotlib.pyplot as plt
from IPython.display import HTML
import torchvision.utils as vutils
import matplotlib.animation as animation

from IPython import embed

if __name__ == "__main__":

    model_dir = '../checkpoints'
    mids = list(range(1, 11))
    fixed_noise = torch.randn(64, 100, 1, 1).cuda(0)
    generator = Generator(100, 64).cuda(0)
    generator = torch.nn.DataParallel(generator, device_ids=[0, 1])
    imgs_list = []

    for mid in mids:

        checkpoints = torch.load(
            os.path.join(model_dir, 'epoch_%d.pth.tar' % mid))
        epoch = checkpoints['epoch']
        generator.load_state_dict(checkpoints['generator'])
        print('epoch : %d, mid : %d' % (epoch, mid))
        generator.eval()
        fake = generator(fixed_noise).detach().cpu()
        imgs_list.append(fake)

    fig = plt.figure(figsize=(8, 8))
コード例 #16
0
import soundfile
from scipy import signal
from scipy.io import wavfile

from utils import slice_signal, Generator

pre_emphasis = lambda batch: signal.lfilter([1, -0.95], [1], batch)
de_emphasis = lambda batch: signal.lfilter([1], [1, -0.95], batch)

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# device = torch.device('cpu')
PATH = os.path.dirname(os.path.realpath(__file__))

if __name__ == '__main__':

    generator = nn.DataParallel(Generator(), device_ids=[1, 0])
    state = torch.load(f'{PATH}/checkpoints/state-13.pkl', map_location=device)
    generator.load_state_dict(state['generator'])
    generator.to(device)

    for file in os.listdir(f'{PATH}/input'):

        # Read and slice audio input
        noisy_slices = slice_signal(f'{PATH}/input/{file}', 2**13, 1, 8000)
        enhanced_speech = []

        for noisy_slice in noisy_slices:
            noisy_slice = noisy_slice.reshape(1, 1, 8192)
            generator.eval()
            z = nn.init.normal(torch.Tensor(1, 1024, 8))
            noisy_slice = torch.from_numpy(pre_emphasis(noisy_slice)).type(
コード例 #17
0
parser.add_argument("--augment_images", default=False, type=bool, help="Augment images using transformations")
parser.add_argument("--model_path", default="models", help="Model Save Path")
parser.add_argument("--log_path", default="logs", help="Training Log Path")
parser.add_argument("--export_js", default=False, help="Export to TensorflowJS")

args = parser.parse_args()

load_path = args.load_path
input_size = args.input_size
batch_size = args.batch_size
n_epochs = args.epochs
augment = args.augment_images
model_path = args.model_path
log_path = args.log_path

generator = Generator(load_path)

n_train = len(generator.train_files)
n_test = len(generator.test_files)

print('Number of train images :', n_train)
print('Number of test images :', n_test)

# Test to check generator
# generator.test_keras_generator(batch_size=4)

def relu6(x):
    '''Custom activation using relu6'''

    return K.relu(x, max_value=6)
コード例 #18
0
ファイル: train.py プロジェクト: Kingsleyandher/Keras_BASNet
nb_valid = int(len(trainFile)*config.valid_percent)
val_lines = trainFile[ind_list[:nb_valid]] 
train_lines = trainFile[ind_list[nb_valid:]] 

# load model
model = ModelBASNet()

checkpoint_period = ModelCheckpoint(log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
                                    monitor='val_loss', save_weights_only=True, save_best_only=True, period=1, mode='min')
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=3, verbose=1)
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1, mode='min')
tensorboard = TensorBoard(log_dir=log_dir)


# train model
if True:

    model.compile(optimizer=config.optimizer,
                  loss=muti_bce_loss_fusion,
                  metrics=["binary_crossentropy", output_loss, dice_coef, compute_IOU,ssim])
    print('Train on {} samples, val on {} samples, with batch size {}.'.format(len(train_lines), len(val_lines),
                                                                               config.batch_size))

    gen = Generator(config.batch_size, train_lines, config.image_shape, config.nb_class).generate()
    gen_val = Generator(config.batch_size, val_lines, config.image_shape, config.nb_class).generate(False)

    model.fit_generator(gen,steps_per_epoch=max(1, len(train_lines) // config.batch_size),
                        validation_data=gen_val, validation_steps=max(1, len(val_lines) // config.batch_size),
                        epochs=config.nb_epoch,initial_epoch=config.init_Epoch,
                        callbacks=[checkpoint_period, reduce_lr, tensorboard],shuffle=True)
コード例 #19
0
def main(argv=None):
    # CLI
    parser = argparse.ArgumentParser()
    parser.add_argument("name", help="Name of the experiment")
    parser.add_argument(
        "-a",
        "--augment",
        action="store_true",
        help="If True, we apply augmentations",
    )
    parser.add_argument("-b",
                        "--batch-size",
                        type=int,
                        default=16,
                        help="Batch size")
    parser.add_argument(
        "--b1",
        type=float,
        default=0.5,
        help="Adam optimizer hyperparamter",
    )
    parser.add_argument(
        "--b2",
        type=float,
        default=0.999,
        help="Adam optimizer hyperparamter",
    )
    parser.add_argument(
        "-d",
        "--device",
        type=str,
        default="cpu",
        choices=["cpu", "cuda"],
        help="Device to use",
    )
    parser.add_argument(
        "--eval-frequency",
        type=int,
        default=400,
        help="Generate generator images every `eval_frequency` epochs",
    )
    parser.add_argument(
        "--latent-dim",
        type=int,
        default=100,
        help="Dimensionality of the random noise",
    )
    parser.add_argument("--lr",
                        type=float,
                        default=0.0002,
                        help="Learning rate")
    parser.add_argument(
        "--ndf",
        type=int,
        default=32,
        help="Number of discriminator feature maps (after first convolution)",
    )
    parser.add_argument(
        "--ngf",
        type=int,
        default=32,
        help=
        "Number of generator feature maps (before last transposed convolution)",
    )
    parser.add_argument(
        "-n",
        "--n-epochs",
        type=int,
        default=200,
        help="Number of training epochs",
    )
    parser.add_argument(
        "--mosaic-size",
        type=int,
        default=10,
        help="Size of the side of the rectangular mosaic",
    )
    parser.add_argument(
        "-p",
        "--prob",
        type=float,
        default=0.9,
        help="Probability of applying an augmentation",
    )

    args = parser.parse_args(argv)
    args_d = vars(args)
    print(args)

    img_size = 128

    # Additional parameters
    device = torch.device(args.device)
    mosaic_kwargs = {"nrow": args.mosaic_size, "normalize": True}
    n_mosaic_cells = args.mosaic_size * args.mosaic_size
    sample_showcase_ix = (
        0  # this one will be used to demonstrate the augmentations
    )

    augment_module = torch.nn.Sequential(
        K.RandomAffine(degrees=0, translate=(1 / 8, 1 / 8), p=args.prob),
        K.RandomErasing((0.0, 0.5), p=args.prob),
    )

    # Loss function
    adversarial_loss = torch.nn.BCELoss()

    # Initialize generator and discriminator
    generator = Generator(latent_dim=args.latent_dim, ngf=args.ngf)
    discriminator = Discriminator(
        ndf=args.ndf, augment_module=augment_module if args.augment else None)

    generator.to(device)
    discriminator.to(device)

    # Initialize weights
    generator.apply(init_weights_)
    discriminator.apply(init_weights_)

    # Configure data loader
    data_path = pathlib.Path("data")
    tform = transforms.Compose([
        transforms.Resize(img_size),
        transforms.ToTensor(),
        transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]),
    ])
    dataset = DatasetImages(
        data_path,
        transform=tform,
    )
    dataloader = DataLoader(
        dataset,
        batch_size=args.batch_size,
        shuffle=True,
    )

    # Optimizers
    optimizer_G = torch.optim.Adam(generator.parameters(),
                                   lr=args.lr,
                                   betas=(args.b1, args.b2))
    optimizer_D = torch.optim.Adam(discriminator.parameters(),
                                   lr=args.lr,
                                   betas=(args.b1, args.b2))

    # Output path and metadata
    output_path = pathlib.Path("outputs") / args.name
    output_path.mkdir(exist_ok=True, parents=True)

    # Add other parameters (not included in CLI)
    args_d["time"] = datetime.now()
    args_d["kornia"] = str(augment_module)

    # Prepare tensorboard writer
    writer = SummaryWriter(output_path)

    # Log hyperparameters as text
    writer.add_text(
        "hyperparameter",
        pprint.pformat(args_d).replace(
            "\n", "  \n"),  # markdown needs 2 spaces before newline
        0,
    )
    # Log true data
    writer.add_image(
        "true_data",
        make_grid(torch.stack([dataset[i] for i in range(n_mosaic_cells)]),
                  **mosaic_kwargs),
        0,
    )
    # Log augmented data
    batch_showcase = dataset[sample_showcase_ix][None, ...].repeat(
        n_mosaic_cells, 1, 1, 1)
    batch_showcase_aug = discriminator.augment_module(batch_showcase)
    writer.add_image("augmentations",
                     make_grid(batch_showcase_aug, **mosaic_kwargs), 0)

    # Prepate evaluation noise
    z_eval = torch.randn(n_mosaic_cells, args.latent_dim).to(device)

    for epoch in tqdm(range(args.n_epochs)):
        for i, imgs in enumerate(dataloader):
            n_samples, *_ = imgs.shape
            batches_done = epoch * len(dataloader) + i

            # Adversarial ground truths
            valid = 0.9 * torch.ones(
                n_samples, 1, device=device, dtype=torch.float32)
            fake = torch.zeros(n_samples,
                               1,
                               device=device,
                               dtype=torch.float32)

            # D preparation
            optimizer_D.zero_grad()

            # D loss on reals
            real_imgs = imgs.to(device)
            d_x = discriminator(real_imgs)
            real_loss = adversarial_loss(d_x, valid)
            real_loss.backward()

            # D loss on fakes
            z = torch.randn(n_samples, args.latent_dim).to(device)
            gen_imgs = generator(z)
            d_g_z1 = discriminator(gen_imgs.detach())

            fake_loss = adversarial_loss(d_g_z1, fake)
            fake_loss.backward()

            optimizer_D.step()  # we called backward twice, the result is a sum

            # G preparation
            optimizer_G.zero_grad()

            # G loss
            d_g_z2 = discriminator(gen_imgs)
            g_loss = adversarial_loss(d_g_z2, valid)

            g_loss.backward()
            optimizer_G.step()

            # Logging
            if batches_done % 50 == 0:
                writer.add_scalar("d_x", d_x.mean().item(), batches_done)
                writer.add_scalar("d_g_z1", d_g_z1.mean().item(), batches_done)
                writer.add_scalar("d_g_z2", d_g_z2.mean().item(), batches_done)
                writer.add_scalar("D_loss", (real_loss + fake_loss).item(),
                                  batches_done)
                writer.add_scalar("G_loss", g_loss.item(), batches_done)

            if epoch % args.eval_frequency == 0 and i == 0:
                generator.eval()
                discriminator.eval()

                # Generate fake images
                gen_imgs_eval = generator(z_eval)

                # Generate nice mosaic
                writer.add_image(
                    "fake",
                    make_grid(gen_imgs_eval.data, **mosaic_kwargs),
                    batches_done,
                )

                # Save checkpoint (and potentially overwrite an existing one)
                torch.save(generator, output_path / "model.pt")

                # Make sure generator and discriminator in the training mode
                generator.train()
                discriminator.train()