示例#1
0
def main():
    st.title(" Script Buddy *v2*")
    st.write("""
    Film Script Language Generation with GPT2
    ***
    """)
    model, tokenizer = loader()
    max_length = st.sidebar.slider(
        """ Max Script Length 
        (Longer length, slower generation)""", 50, 1000)
    context = st.sidebar.text_area("Context")
    if st.sidebar.button("Generate"):
        start_time = time.time()
        if context:
            sample = generate(model,
                              tokenizer,
                              input_text=context,
                              max_length=max_length)
        else:
            sample = generate(model, tokenizer, max_length=max_length)

        end_time = time.time()

        print(end_time - start_time)
    else:
        sample = ['']

    st.text(sample[0])
def main():
    args = parser.parse_args()

    src_dir = os.path.join(DATA_DIR, 'noised_tgt')
    dst_dir = os.path.join(DATA_DIR, 'tmp')

    # clear
    for name in os.listdir(dst_dir):
        if name.endswith('.npy'):
            os.remove(os.path.join(dst_dir, name))

    # model
    snapshot = torch.load(args.file, map_location=lambda s, _: s)
    model = Generator(snapshot['channels'])
    model.load_state_dict(snapshot['model'])

    if args.gpu is not None:
        model.cuda(args.gpu)

    # generate
    for name in sorted(os.listdir(os.path.join(src_dir))):
        if not name.endswith('.npy'):
            continue

        src = os.path.join(src_dir, name)
        dst = os.path.join(dst_dir, name[7:])
        generate(model, src, dst, args.gpu)

    # archive
    with zipfile.ZipFile('submission.zip', 'w') as zip_writer:
        for name in sorted(os.listdir(dst_dir)):
            zip_writer.write(os.path.join(dst_dir, name), name)
示例#3
0
 def build(self):
     num_fast_nodes = 0
     for i in range(self.n_nodes):
         if self.num_fast_nodes < num_fast_nodes:
             self.nodes.append(Node(self.queue_size, 2))
             num_fast_nodes +=1
         else:
             self.nodes.append(Node(self.queue_size, 1))
     if self.verbose:                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                         
         print("Generating Arrival and Service Times...")
     for i in tqdm(range(self.num_arrivals)):
         self.arrival_times.append(utils.generate(self.arrival_distn))
         self.service_times.append(utils.generate(self.service_distn))
     self.arrival_times.sort()
def train():
    model_version, model_name = get_latest_model()
    logger.info("Training on gathered game data, initializing from {}".format(
        model_name))
    new_model_name = generate(model_version + 1)
    logger.info("New model will be {}".format(new_model_name))
    save_file = os.path.join(PATHS.MODELS_DIR, new_model_name)

    try:
        logger.info("Getting tf_records")
        tf_records = sorted(
            gfile.Glob(os.path.join(PATHS.TRAINING_CHUNK_DIR,
                                    '*.tfrecord.zz')))
        tf_records = tf_records[-1 *
                                (GLOBAL_PARAMETER_STORE.WINDOW_SIZE //
                                 GLOBAL_PARAMETER_STORE.EXAMPLES_PER_RECORD):]

        print("Training from:", tf_records[0], "to", tf_records[-1])

        with timer("Training"):
            network.train(PATHS.ESTIMATOR_WORKING_DIR, tf_records,
                          model_version + 1)
            network.export_latest_checkpoint_model(PATHS.ESTIMATOR_WORKING_DIR,
                                                   save_file)

    except:
        logger.info("Got an error training")
        logging.exception("Train error")
示例#5
0
    def loop_outer(self):
        steps = self.hp.steps_outer
        interval = steps // 10
        loader = self.loaders[Splits.train]
        tracker = MetricsTracker(prefix=Splits.train)
        stop_saver = EarlyStopSaver(self.net)

        for i, batch in enumerate(
                generate(loader, limit=steps, show_progress=True)):
            self.opt_outer.zero_grad()
            for task in MetaBatch(batch, self.device).get_tasks():
                metrics = self.loop_inner(task)
                tracker.store(metrics)
                self.opt_outer.store_grad()
            self.opt_outer.step(i)

            if i % interval == 0:
                metrics = tracker.get_average()
                tracker.reset()
                metrics.update(self.loop_eval(Splits.val))
                print({k: round(v, 3) for k, v in metrics.items()})
                stop_saver.check_stop(metrics["val_loss"])
            self.save()
        stop_saver.load_best()
        self.save()
示例#6
0
def home():
    if request.method == "POST":
        line = request.form["line"]
        meme = request.form["meme"]
        final = utils.generate(line,int(meme))
        return render_template("response.html", image = final)
    else:
        return render_template("home.html")
示例#7
0
def make_association(sender, instance, **kwargs):
    if not instance.server_url or not instance.assoc_type:
        raise Exception('association.server_url or association.assoc_type is None')

    response = utils.generate(instance.server_url, assoc_type=instance.assoc_type)
    instance.handle = response['handle']
    instance.secret = response['secret']
    instance.lifetime = response['lifetime']
    instance.issued = int(time.time())
示例#8
0
async def stream_files(request):
    file = request.path_params.get("file")
    return StreamingResponse(
        generate(file),
        headers={
            "content-disposition": f"attachment; filename={file}",
            "content-type": "application/octet-stream",
        },
    )
示例#9
0
def parallel_generate(i):
    data = []
    labels = []
    for j in range(1, RANGE):
        grids = generate(ligands[j][0], ligands[j][1], proteins[i][0],
                         proteins[i][1], RADIUS, DISTANCE_THRESHOLD)
        data.extend(grids)
        label = 1 if i == j else 0
        labels.extend([label] * (len(grids)))
    return data, labels
def initialize_random_model():
    bootstrap_name = generate(0)
    bootstrap_model_path = os.path.join(PATHS.MODELS_DIR, bootstrap_name)
    logger.info(
        "Bootstrapping with working dir {}\n Model 0 exported to {}".format(
            PATHS.ESTIMATOR_WORKING_DIR, bootstrap_model_path))
    os.makedirs(PATHS.ESTIMATOR_WORKING_DIR, exist_ok=True)
    os.makedirs(os.path.dirname(bootstrap_model_path), exist_ok=True)
    network.initialize_random_model(PATHS.ESTIMATOR_WORKING_DIR)
    network.export_latest_checkpoint_model(PATHS.ESTIMATOR_WORKING_DIR,
                                           bootstrap_model_path)
示例#11
0
    def loop_inner(
        self,
        task: List[torch.Tensor],
        bs: int,
        steps: int,
    ) -> Dict[str, float]:
        x_train, y_train, x_test, y_test = task
        ds = TensorDataset(x_train, y_train)
        loader = DataLoader(ds, bs, shuffle=True)

        for batch in generate(loader, limit=steps):
            self.run_batch(batch, is_train=True)
        return self.run_batch((x_test, y_test), is_train=False)
    def loop_inner(self, task: MetaTask) -> Dict[str, float]:
        ds = RepeatTensorDataset(*task.train, num_repeat=self.hp.bs_inner)
        loader = DataLoader(ds, self.hp.bs_inner, shuffle=True)
        steps_per_epoch = self.hp.num_shots * self.hp.num_ways // self.hp.bs_inner
        stopper = EarlyStopCallback(self.net)

        for i, batch in enumerate(generate(loader, limit=self.hp.steps_inner)):
            self.run_batch(batch, is_train=True)
            if i % steps_per_epoch == 0 and self.hp.early_stop:
                metrics = self.run_batch(task.val, is_train=False)
                if stopper.check_stop(metrics["loss"]):
                    stopper.load_best()
                    break
        return self.run_batch(task.test, is_train=False)
    def loop_inner(self, task: MetaTask) -> Dict[str, float]:
        ds = RepeatTensorDataset(*task.train, num_repeat=self.hp.bs_inner)
        loader = DataLoader(ds, self.hp.bs_inner, shuffle=True)
        steps_per_epoch = self.hp.num_shots * self.hp.num_ways // self.hp.bs_inner
        stopper = EarlyStopCallback(self.net)

        proto = self.get_prototypes(*task.train)

        # calculate mean support for each embedding
        for batch in generate(loader, limit=steps):
            self.run_batch(batch, prototypes, is_train=True)

        # run the different training method
        return self.run_batch((x_test, y_test), prototypes, is_train=False)
    def initialize(self):
        self.first_layer_wts = utils.generate(self.hidden_layer_size, self.input_size)
        self.first_layer_bias = utils.generate(self.hidden_layer_size, 1)

        self.second_layer_wts = utils.generate(self.hidden_layer_size, self.hidden_layer_size)
        self.second_layer_bias = utils.generate(self.hidden_layer_size, 1)

        self.result_wts = utils.generate(self.output_size, self.hidden_layer_size)
        self.result_bias = utils.generate(self.output_size, 1)
示例#15
0
def generate():
    sz = int(str(request.args.get('sz')))
    e = ""
    if request.args.get('e'):
        e = int(str(request.args.get('e')))
    if sz:
        if request.args.get('e'):
            ret = utils.generate(sz, e)
        else:
            ret = utils.generate(sz)
    else:
        return "Invalid"
    final_ret = {
        'n': ret[0],
        'e': hex(ret[1]),
        'p': ret[3],
        'q': ret[4],
        'dp_1': ret[5],
        'dq_1': ret[6],
        'coef': ret[7],
        'd': hex(ret[9].d)
    }

    return json.dumps(final_ret)
示例#16
0
def main(_):
  config = flags.FLAGS
  if config.mode == "train":
    assert config.dataset in ("mnist", "cifar10")
    config.in_shape = (config.batch_size, 32, 32, 3)
    config.block_list = [eval(x) for x in config.block_list]
    config.stride_list = [eval(x) for x in config.stride_list]
    config.channel_list = [eval(x) for x in config.channel_list]

    train(config)
  elif config.mode == "debug":
    config.train_steps = 1
    config.viz_steps = 1
    config.block_list = [2, 2, 2]
    config.channel_list = [3, 4, 5]
    config.stride_list = [1, 1, 2]
    config.in_shape = (config.batch_size, 28, 28, 1)
    train(config, debug=True)
  elif config.mode == "prepare":
    download_dataset(config)
  elif config.mode == "sn":
    test_spectral_norm()
  elif config.mode == "iresnet":
    test_iresnet()
  elif config.mode == "trace":
    test_trace_approximation()
  elif config.mode == "inverse":
    test_block_inversion()
  elif config.mode == "squeeze":
    test_squeeze()
  elif config.mode == "trace_sn":
    test_trace_sn()
  elif config.mode == "generate":
    generate(config)
  elif config.mode == "reconstruct":
    reconstruct(config)
示例#17
0
def main():
    axiom = axiom_init()
    x, y = 0, length

    while True:
        display.fill(BG)
        angle = 0
        position = (0, 0)
        for character in axiom:
            for event in pygame.event.get():
                if event.type == pygame.QUIT:
                    pygame.quit()
                    quit()

            position, angle, x, y = turtle(draw, character,
                position[0], position[1], angle, x, y)

        axiom = generate(axiom)
        clock.tick(1)
示例#18
0
def generate_training_data():
    data = []
    labels = []
    proteins = [[None, None]]
    ligands = [[None, None]]
    for i in range(1, RANGE):
        p_coordinates, p_atom_types = read_pdb(
            "training_data/{0}_pro_cg.pdb".format('%04d' % i))
        l_coordinates, l_atom_types = read_pdb(
            "training_data/{0}_lig_cg.pdb".format('%04d' % i))
        proteins.append([p_coordinates, p_atom_types])
        ligands.append([l_coordinates, l_atom_types])
    for i in range(1, RANGE):
        for j in range(1, RANGE):
            grids = generate(ligands[j][0], ligands[j][1], proteins[i][0],
                             proteins[i][1], RADIUS, DISTANCE_THRESHOLD)
            data.extend(grids)
            label = 1 if i == j else 0
            labels.extend([label] * (len(grids)))
    return data, labels
示例#19
0
    def loop_outer(self):
        steps = self.hparams.steps_outer
        interval = steps // 10
        loader = self.loaders[Splits.train]
        tracker = MetricsTracker(prefix=Splits.train)

        for i, batch in enumerate(
                generate(loader, limit=steps, show_progress=True)):
            self.opt_outer.zero_grad()
            for task in MetaBatch(batch, self.device).get_tasks():
                metrics = self.loop_inner(task, self.hparams.bs_inner,
                                          self.hparams.steps_inner)
                tracker.store(metrics)
                self.opt_outer.store_grad()
            self.opt_outer.step(i)

            if i % interval == 0:
                metrics = tracker.get_average()
                tracker.reset()
                metrics.update(self.loop_val())
                print({k: round(v, 3) for k, v in metrics.items()})
示例#20
0
def submit():
    sequence = request.args.get("sequence", "MQVWPIEGIKKFETLSYLPP")
    simple_url = generate(sequence)
    simple_result = requests.get(simple_url)
    print(f" Simple STATUS CODE: {simple_result.status_code}")
    status = simple_result.status_code
    attempt = 0
    while status == 404 and attempt <= 300:
        sleep(1)
        print(attempt)
        print(simple_url)
        print(status)
        simple_result = requests.get(simple_url)
        status = simple_result.status_code
        attempt += 1
    if status == 200:
        primary, secondary = parse_html(simple_result.content.decode("utf-8"))
        secondary_list = structure_to_list(secondary)
        return jsonify(primary_structure=primary,
                       secondary_structure=secondary,
                       secondary_list=secondary_list)
    return "Sequence too long or JPRED API is down, try again later."
示例#21
0
    def on_epoch_end(self, epoch, logs={}):
        epoch += self.offset
        model.save_weights('checkpoints/model_weights_%d.h5' % epoch, overwrite=True)

        # val_loss = logs['val_loss']

        # write some sample generated text

        seed_text = '\n'
        diversities = [0.2, 0.5, 0.7, 1.0]
        generateds = [utils.generate(model, seed_text, diversity, batch_size) 
                for diversity in diversities]

        print(generateds[1])

        filename = 'checkpoints/epoch_%d_gen.txt' % epoch
        with open(filename, 'w') as f:
            f.write('----- Epoch number %d\n' % epoch)
            #f.write('-- Validation loss: %0.6f\n' % val_loss)

            for generated,diversity in zip(generateds, diversities):
                f.write('----- diversity: %0.1f\n' % diversity)
                f.write(generated + '\n\n\n\n')
示例#22
0
def main():
    args = parse_args()
    args.verbose = True

    # prime file validation
    if args.prime_file and not os.path.exists(args.prime_file):
        utils.log(
            'Error: prime file {} does not exist. Exiting.'.format(
                args.prime_file), True)
        exit(1)
    else:
        if not os.path.isdir(args.data_dir):
            utils.log(
                'Error: data dir {} does not exist. Exiting.'.format(
                    args.prime_file), True)
            exit(1)

    midi_files = [args.prime_file] if args.prime_file else \
     [os.path.join(args.data_dir, f) for f in os.listdir(args.data_dir) \
      if '.mid' in f or '.midi' in f]

    experiment_dir = get_experiment_dir(args.experiment_dir)
    utils.log('Using {} as --experiment_dir'.format(experiment_dir),
              args.verbose)

    if not args.save_dir:
        args.save_dir = os.path.join(experiment_dir, 'generated')

    if not os.path.isdir(args.save_dir):
        os.makedirs(args.save_dir)
        utils.log('Created directory {}'.format(args.save_dir), args.verbose)

    if not args.from_checkpoint:
        model, epoch = train.get_model(args, experiment_dir=experiment_dir)
        utils.log(
            'Model loaded from {}'.format(
                os.path.join(experiment_dir, 'model.json')), args.verbose)
    else:
        # Load from checkpoint
        with open(os.path.join(experiment_dir, 'model.json'), 'r') as f:
            model = utils.model_from_json(f.read())
        epoch = int(args.from_checkpoint)
        newest_checkpoint = os.path.join(
            experiment_dir,
            f"checkpoints/checkpoint-epoch_{args.from_checkpoint}.hdf5")
        utils.load_checkpoint(model, newest_checkpoint)
        utils.log('Model loaded from checkpoint {}'.format(newest_checkpoint),
                  args.verbose)

    window_size = model.layers[0].get_input_shape_at(0)[1]
    seed_generator = utils.get_data_generator(
        midi_files,
        window_size=window_size,
        batch_size=32,
        num_threads=1,
        use_instrument=args.use_instrument,
        ignore_empty=args.ignore_empty,
        encode_section=args.encode_section,
        max_files_in_ram=10)

    # validate midi instrument name
    try:
        # try and parse the instrument name as an int
        instrument_num = int(args.midi_instrument)
        if not (instrument_num >= 0 and instrument_num <= 127):
            utils.log('Error: {} is not a supported instrument. Number values must be ' \
                'be 0-127. Exiting'.format(args.midi_instrument), True)
            exit(1)
        args.midi_instrument = pretty_midi.program_to_instrument_name(
            instrument_num)
    except ValueError as err:
        # if the instrument name is a string
        try:
            # validate that it can be converted to a program number
            _ = pretty_midi.instrument_name_to_program(args.midi_instrument)
        except ValueError as er:
            utils.log('Error: {} is not a valid General MIDI instrument. Exiting.' \
                .format(args.midi_instrument), True)
            exit(1)

    if args.multi_instruments:

        if not args.prime_file:
            utils.log(
                'Error: You need to specify a prime file when generating a multi instrument track. Exiting.',
                True)
            exit(1)

        utils.log(f"Sampling from single seed file: {args.prime_file}",
                  args.verbose)

        generated_midi = pretty_midi.PrettyMIDI(initial_tempo=80)

        source_midi = utils.parse_midi(args.prime_file)

        melody_instruments = source_midi.instruments
        # melody_instruments = utils.filter_monophonic(source_midi.instruments, 1.0)

        for instrument in melody_instruments:
            instrument_group = utils.get_family_id_by_instrument_normalized(
                instrument.program)

            # Get source track seed
            X, y = [], []
            windows = utils._encode_sliding_windows(instrument, window_size)
            for w in windows:
                if np.min(w[0][:, 0]) == 1:
                    # Window only contains pauses.. ignore!
                    continue
                X.append(w[0])
            if len(X) <= 5:
                continue
            seed = X[random.randint(0, len(X) - 1)]

            # Generate track for this instrument
            generated = []
            buf = np.copy(seed).tolist()
            while len(generated) < args.file_length:
                buf_expanded = [x for x in buf]

                # Add instrument class to input
                if args.use_instrument:
                    buf_expanded = [[instrument_group] + x
                                    for x in buf_expanded]

                # Add section encoding to input
                if args.encode_section:
                    sections = [0] * 4
                    active_section = int(
                        (len(generated) / args.file_length) * 4)
                    sections[active_section] = 1
                    buf_expanded = [sections + x for x in buf_expanded]

                # Get prediction
                arr = np.expand_dims(np.asarray(buf_expanded), 0)
                pred = model.predict(arr)

                # prob distribution sampling
                index = np.random.choice(range(0, seed.shape[1]), p=pred[0])
                pred = np.zeros(seed.shape[1])

                pred[index] = 1
                generated.append(pred)
                buf.pop(0)
                buf.append(pred.tolist())

            # Create instrument
            instrument = utils._network_output_to_instrument(
                generated, instrument.program)

            # Add to target midi
            generated_midi.instruments.append(instrument)

        if len(generated_midi.instruments) == 0:
            raise Exception(
                f"Found no monophonic instruments in {args.prime_file}")

        # Save midi
        time = datetime.now().strftime("%Y%m%d%H%M%S")
        sample_name = f"{args.save_dir}/sampled_{time}.mid"
        print(f"Writing generated sample to {sample_name}")
        generated_midi.write(sample_name)

    else:
        # generate 10 tracks using random seeds
        utils.log('Loading seed files...', args.verbose)
        X, y = next(seed_generator)
        generated = utils.generate(model,
                                   X,
                                   window_size,
                                   args.file_length,
                                   args.num_files,
                                   args.midi_instrument,
                                   use_instrument=args.use_instrument,
                                   encode_section=args.encode_section)
        for i, midi in enumerate(generated):
            file = os.path.join(
                args.save_dir,
                f"{i+1}_instrument{midi.instruments[0].program}.mid")
            midi.write(file.format(i + 1))
            utils.log('wrote midi file to {}'.format(file), True)
    #  iteration 
    # curl from -L to log_2(2n) 
    # 2^L > n
    # L > log_2(n)
    # curl increase by one
    # Maximum 'O(L)' iteration
    if curl==diadic[0]:
        # If there is an element for diadic expansion
        coin = coin_list_packed.pop(0)
        print("Select {}".format(str(coin)))
        coin_ans.append(coin)
        diadic.pop(0)
        # O(1)

    if curl<0:
        coin_list_cur = generate(freq_list, curl)
        # generate 'n' elements : O(n) 
        coin_list_packed = merge(coin_list_packed, coin_list_cur)
        # len(coin_list_packed) < 2n 
        # len(coin_list_cur) = n
        # Time complexity of merge operation : O(n)
        coin_list_packed = package(coin_list_packed)
        # packing
        # pack => O(1)
        # maximum len(coin_list_packed)/2 < n => O(n)
    else:
        coin_list_packed = package(coin_list_packed)
        # packing
        # pack => O(1)
        # maximum len(coin_list_packed)/2 < n => O(n)
    curl+=1
示例#24
0
from random import randint

from faker import Factory

from utils import generate

fake = Factory.create('pl_PL')

if __name__ == '__main__':
    template = """
    <samochod>
        <marka>{marka}</marka>
        <model>{model}</model>
        <rocznik>{rocznik}</rocznik>
        <num_kat>{num_kat}</num_kat>
        <cena>{cena}</cena>
    </samochod>
    """

    def gen_fun():
        return template.format(**{
            'marka': fake.text()[:10],
            'model': fake.text()[:10],
            'rocznik': randint(1970, 2000),
            'num_kat': randint(1000, 2000),
            'cena': randint(10000, 50000)
        })

    generate('cennik', gen_fun, 'ceny.xml')
示例#25
0
model.summary()

callbacks = [
    ReduceLROnPlateau(monitor='val_loss',
                      factor=0.1,
                      patience=4,
                      verbose=1,
                      min_lr=1e-9),
    ModelCheckpoint(filepath=f'./Models/{steps}_new_model.h5',
                    monitor='val_acc',
                    save_best_only=False,
                    mode='max',
                    save_weights_only=False), history
]

gen = generate(batchsize=batchsize, steps=steps, T=T, sigma=sigma)
model.fit_generator(generator=gen,
                    steps_per_epoch=50,
                    epochs=25,
                    verbose=1,
                    callbacks=callbacks,
                    validation_data=generate(batchsize=batchsize,
                                             steps=steps,
                                             T=T,
                                             sigma=sigma),
                    validation_steps=25)

train_loss = history.history['loss']
val_loss = history.history['val_loss']
train_acc = history.history['acc']
val_acc = history.history['val_acc']
示例#26
0
        if p < k:
            s = p + 1
        elif p > k:
            e = p - 1
        else:
            return list[k]
    
    return list[k]
    
if __name__ == "__main__":
    #fix import issues
    import sys
    from os import path
    sys.path.append( path.dirname( path.dirname( path.abspath(__file__) ) ) )
    
    #import array generator
    from utils import generate
    
    a = generate(10, 100)
    print(a)
    
    min_el = quick_select(a, 0) # find min
    median = quick_select(a, len(a)//2) #find the median
    max_el = quick_select(a, len(a)-1) #find max
    
    print("The min is = " , min_el)
    print("The median is = " , median)
    print("The max is = " , max_el)
    
    
示例#27
0
            cov_prime = cov.reshape(int(N / 3), 3, int(N / 3), 3)
            cov = cov_prime.reshape(N, N, order='F')

            results['cov'] = cov
            results['means'] = means

            # SVD covs
            for i in range(k):
                u, s, vt = np.linalg.svd(covs[i])
                covs[i] = (u[:, :5], s[:5], vt[:5, :])

            # Plot most probable
            j = np.argmax(cluster_probs)
            X_samples = []
            for _ in range(25):
                X_samples.append(generate(means[j], covs[j]))
            X_samples_25 = np.array(X_samples)

            results['X_samples'] = X_samples_25
            results['most_probable'] = j

            results['log_probs'] = []
            results['X_test'] = X_test

            results['position_kls'] = []
            results['position_sample'] = []

            results['velocity_longitudinal_kls'] = []
            results['velocity_longitudinal_test'] = []
            results['velocity_longitudinal_sample'] = []
def main(argv):
    parser = argparse.ArgumentParser(
        description='WikiText-2 language modeling')
    parser.add_argument('--batch-size',
                        type=int,
                        default=70,
                        metavar='N',
                        help='input batch size for training (default: 90)'),
    parser.add_argument('--eval-batch-size',
                        type=int,
                        default=50,
                        metavar='N',
                        help='input batch size for training (default: 50)'),
    parser.add_argument('--save-directory',
                        type=str,
                        default='output/wikitext-2',
                        help='output directory')
    parser.add_argument('--model-save-directory',
                        type=str,
                        default='../models/',
                        help='output directory')
    parser.add_argument('--epochs',
                        type=int,
                        default=5,
                        metavar='N',
                        help='number of epochs to train')
    parser.add_argument('--base-seq-len',
                        type=int,
                        default=70,
                        metavar='N',
                        help='Batch length'),
    parser.add_argument('--min-seq-len',
                        type=int,
                        default=50,
                        metavar='N',
                        help='minimum batch length'),
    parser.add_argument('--seq-prob',
                        type=int,
                        default=0.95,
                        metavar='N',
                        help='prob of being divided by 2'),
    parser.add_argument('--seq-std',
                        type=int,
                        default=6,
                        metavar='N',
                        help='squence length std'),
    parser.add_argument('--hidden-dim',
                        type=int,
                        default=1150,
                        metavar='N',
                        help='Hidden dim')
    parser.add_argument('--embedding-dim',
                        type=int,
                        default=400,
                        metavar='N',
                        help='Embedding dim')
    parser.add_argument('--lr',
                        type=int,
                        default=1e-4,
                        metavar='N',
                        help='learning rate'),
    parser.add_argument('--weight-decay',
                        type=int,
                        default=2e-6,
                        metavar='N',
                        help='learning rate'),
    parser.add_argument('--tag',
                        type=str,
                        default='testing.pt',
                        metavar='N',
                        help='learning rate'),
    parser.add_argument('--no-cuda',
                        action='store_true',
                        default=False,
                        help='disables CUDA training')

    args = parser.parse_args(argv)
    args.cuda = not args.no_cuda and torch.cuda.is_available()

    #load dataset
    train_data, val_data, vocabulary = (
        np.load('../dataset/wiki.train.npy')[:2],
        np.load('../dataset/wiki.valid.npy')[:1],
        np.load('../dataset/vocab.npy'))

    word_count = len(vocabulary)

    model = models.LSTMModelV2(word_count=word_count,
                               embedding_dim=embedding_dim,
                               hidden_dim=hidden_dim)
    loss_fn = models.CrossEntropyLoss3D()

    checkpoint_path = os.path.join(args.model_save_directory, args.tag)

    if not os.path.exists(checkpoint_path):
        model = models.LSTMModelV2(word_count=word_count,
                                   embedding_dim=embedding_dim,
                                   hidden_dim=hidden_dim)
    else:
        print("Using pre-trained model")
        print("*" * 90)
        model = models.LSTMModelV2(word_count=word_count,
                                   embedding_dim=embedding_dim,
                                   hidden_dim=hidden_dim)
        checkpoint_path = os.path.join(args.model_save_directory, args.tag)
        model.load_state_dict(torch.load(checkpoint_path))

    if args.cuda:
        model = model.cuda()
        loss_fn = loss_fn.cuda()

    generated = utils.generate(model,
                               sequence_length=10,
                               batch_size=2,
                               stochastic=True,
                               args=args).data.cpu().numpy()
    utils.print_generated(utils.to_text(preds=generated,
                                        vocabulary=vocabulary))
    print('Model: ', model)

    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=args.lr,
                                 weight_decay=args.weight_decay)

    logging = dict()
    logging['loss'] = []
    logging['train_acc'] = []
    logging['val_acc'] = []

    model.train()

    for epoch in range(args.epochs):

        epoch_time = time.time()
        np.random.shuffle(train_data)
        train_data_ = utils.batchify(
            utils.to_tensor(np.concatenate(train_data)), args.batch_size)
        val_data_ = utils.batchify(utils.to_tensor(np.concatenate(val_data)),
                                   args.eval_batch_size)
        train_data_loader = utils.custom_data_loader(train_data_, args)
        val_data_loader = utils.custom_data_loader(val_data_,
                                                   args,
                                                   evaluation=True)
        #number of words
        train_size = train_data_.size(0) * train_data_.size(1)
        val_size = val_data_.size(0) * val_data_.size(1)

        n_batchs = len(train_data_)
        n_batchs_val = len(val_data_)
        correct = 0
        epoch_loss = 0
        batch_index = 0
        seq_len = 0
        counter = 0
        hidden = model.init_hidden(args.batch_size)

        while (batch_index < n_batchs - 1):

            optimizer.zero_grad()

            X, y, seq_len = next(train_data_loader)

            hidden = utils.repackage_hidden(hidden)
            model.zero_grad()
            out, hidden = model(X, hidden)
            loss = loss_fn(out, y)

            pred = out.data.max(2)[1].int().view(1, -1)
            predicted = pred.eq(y.data.view_as(pred).int())
            correct += predicted.sum()

            loss.backward()
            #scale lr with respect the size of the seq_len
            utils.adjust_learning_rate(optimizer, args, seq_len)
            torch.nn.utils.clip_grad_norm(model.parameters(), 0.25)
            optimizer.step()
            utils.adjust_learning_rate(optimizer, args, args.base_seq_len)

            epoch_loss += loss.data.sum()
            batch_index += seq_len
            counter += 1
            break
        train_acc = correct / train_size
        train_loss = epoch_loss / counter
        val_acc = validate(model, val_data_loader, n_batchs_val) / val_size

        logging['loss'].append(train_loss)
        logging['train_acc'].append(train_acc)
        logging['val_acc'].append(val_acc)
        utils.save_model(model, checkpoint_path)

        print('=' * 83)
        print(
            '| epoch {:3d} | time: {:5.2f}s | valid acc {:5.2f} | train acc {:5.2f} |'
            'train loss {:8.2f}'.format(epoch + 1, (time.time() - epoch_time),
                                        val_acc, train_acc, train_loss))
        break
示例#29
0
    if s == None:
        s = len(str(max(l)))
    if p == None:
        p = s

    i = s - p

    if i >= s:
        return l

    bins = [[] for _ in range(10)]

    for e in l:
        bins[int(str(e).zfill(s)[i])] += [e]

    return flatten([radix_sort(b, p - 1, s) for b in bins])


if __name__ == "__main__":
    results = {}
    print("Radix Sort.")
    for n in [500, 1000, 5000, 10000, 100000, 500000, 1000000]:
        ts = time()
        items = generate(n)
        radix_sort(items)
        te = time()
        elapsed = (te - ts) * 1000.0
        results[n] = elapsed
        print(" with {} elements took {}".format(n, elapsed))
    print(results)
示例#30
0
    data, ix2word, word2ix = load_dataset(opt.dataset_file)

    #init model
    model = PoetryModel(len(word2ix), 128, 256, num_layers=opt.num_layers)
    #training
    if opt.train_or_load:
        model, loss_list=train(model=model, data=data, ix2word=ix2word, word2ix=word2ix, \
                lr=opt.lr, batch_size=opt.batch_size, num_epochs=opt.num_epochs, \
                device=opt.device)

        #save model
        torch.save(model.state_dict(), opt.save_path + '\\model.pt')
        #draw loss-curve
        draw_fig(loss_list, opt.save_path)
    else:
        #load model
        model.load_state_dict(torch.load(opt.save_path + '\\model.pt'))

    #generate acrostic
    acrostic=gen_acrostic(model=model, start_words=opt.start_words_1, \
                        ix2word=ix2word, word2ix=word2ix, \
                        max_gen_len=opt.max_gen_len, device=opt.device)
    #generate poetry
    Poetry=generate(model=model, start_words=opt.start_words_2, \
                    ix2word=ix2word, word2ix=word2ix, \
                    max_gen_len=opt.max_gen_len, device=opt.device)

    print(Poetry)
    print()
    print(acrostic)
示例#31
0
from time import time
from utils import (
    generate,
)

def insertion_sort(array):
    """This solution uses one `for` and one `while`."""
    for i in range(1, len(array)):
        key = i
        j = i - 1
        while j >= 0 and key < array[j]:
            array[j + 1] = array[j]
            j -= 1
        array[j + 1] = key
    return array

if __name__ == "__main__":
    results = {}
    print("Insertion Sort.")
    for n in [500, 1000, 5000, 10000, 100000, 500000, 1000000]:
        ts = time()
        insertion_sort(generate(n))
        te = time()
        elapsed = (te-ts)*1000.0
        results[n] = elapsed
        print(" with {} elements took {}".format(n, elapsed))
    print(results)
示例#32
0
def main():
    args = parse_args()
    args.verbose = True

    # prime file validation
    if args.prime_file and not os.path.exists(args.prime_file):
        utils.log(
            'Error: prime file {} does not exist. Exiting.'.format(
                args.prime_file), True)
        exit(1)
    else:
        if not os.path.isdir(args.data_dir):
            utils.log(
                'Error: data dir {} does not exist. Exiting.'.format(
                    args.prime_file), True)
            exit(1)

    midi_files = [ args.prime_file ] if args.prime_file else \
                 [ os.path.join(args.data_dir, f) for f in os.listdir(args.data_dir) \
                 if '.mid' in f or '.midi' in f ]

    experiment_dir = get_experiment_dir(args.experiment_dir)
    utils.log('Using {} as --experiment_dir'.format(experiment_dir),
              args.verbose)

    if not args.save_dir:
        args.save_dir = os.path.join(experiment_dir, 'generated')

    if not os.path.isdir(args.save_dir):
        os.makedirs(args.save_dir)
        utils.log('Created directory {}'.format(args.save_dir), args.verbose)

    model, epoch = train.get_model(args, experiment_dir=experiment_dir)
    utils.log(
        'Model loaded from {}'.format(
            os.path.join(experiment_dir, 'model.json')), args.verbose)

    window_size = model.layers[0].get_input_shape_at(0)[1]
    seed_generator = utils.get_data_generator(midi_files,
                                              window_size=window_size,
                                              batch_size=32,
                                              num_threads=1,
                                              max_files_in_ram=10)

    # validate midi instrument name
    try:
        # try and parse the instrument name as an int
        instrument_num = int(args.midi_instrument)
        if not (instrument_num >= 0 and instrument_num <= 127):
            utils.log('Error: {} is not a supported instrument. Number values must be ' \
                   'be 0-127. Exiting'.format(args.midi_instrument), True)
            exit(1)
        args.midi_instrument = pretty_midi.program_to_instrument_name(
            instrument_num)
    except ValueError as err:
        # if the instrument name is a string
        try:
            # validate that it can be converted to a program number
            _ = pretty_midi.instrument_name_to_program(args.midi_instrument)
        except ValueError as er:
            utils.log('Error: {} is not a valid General MIDI instrument. Exiting.'\
                   .format(args.midi_instrument), True)
            exit(1)

    # generate 10 tracks using random seeds
    utils.log('Loading seed files...', args.verbose)
    X, y = next(seed_generator)
    generated = utils.generate(model, X, window_size, args.file_length,
                               args.num_files, args.midi_instrument)
    for i, midi in enumerate(generated):
        file = os.path.join(args.save_dir, '{}.mid'.format(i + 1))
        midi.write(file.format(i + 1))
        utils.log('wrote midi file to {}'.format(file), True)
示例#33
0
from random import randint

from faker import Factory

from utils import generate

fake = Factory.create('pl_PL')

if __name__ == '__main__':
    template = """
<appartement id="{id}">
    <address>{address}</address>
    <city>{city}</city>
    <area>{area}</area>
    <rooms>{rooms}</rooms>
    <year>{year}</year>
    <price>{price}</price>
</appartement>
"""

    root_node = 'appartement'
    generate(root_node, template, {
        'id': 1,
        'address': fake.address(),
        'city': fake.city(),
        'year': randint(1970, 2000),
        'rooms': randint(2, 10),
        'area': randint(40, 150),
        'price': randint(100000, 1000000)
    }, 'test.xml')
示例#34
0
window = 50  # length of window
length = 100  # number of events
number = 10  # number of samples
instrument = 'Acoustic Grand Piano'  # full list is here https://www.midi.org/specifications/item/gm-level-1-sound-set

print('enter window size')
window = int(input())
print('enter lenght of sample')
length = int(input())
print('enter number of samples')
number = int(input())
print('enter instrument (for example Acoustic Grand Piano)')
instrument = input()

model = load_model('v5.hdf5')  # here should be path to model

X, y = next(seed_generator)

generated = utils.generate(model, X, window, length, number, instrument)

if not os.path.isdir('output'):
    os.makedirs('output')

for i, midi in enumerate(generated):
    file = os.path.join('output', '{}.mid'.format(i + 1))
    midi.write(file.format(i + 1))
    fs = FluidSynth(
        'FluidR3Mono_GM.sf3')  # here should be full path to Sound Font file
    fs.midi_to_audio(file.format(i + 1),
                     os.path.join('output', '{}.wav'.format(i + 1)))