示例#1
0
def studentCourses(student,course):
		generate(10)

		if(course == "Math"):
			subject_grades = {"Adding":23,"Subtracting":37,"Multiplication":10, "Division":30}
			links = [{"name":"Adding","link":"/students/"+student + "/course/"+ course + "/Adding"},{"name":"Subtracting","link":"/students/"+student + "/course/"+ course + "/Subtracting"},{"name":"Division","link":"/students/"+student + "/course/"+ course + "/Division"},{"name":"Multiplication","link":"/students/"+student + "/course/"+ course + "/Multiplication"}]
			return render_template("subject_page.html",student= student, items= links,progress = subject_grades,title=student,course = course)
		elif(course == "English"):
			subject_grades = {"Grammar":23,"Comprehension":37}
			links = [{"name":"Grammar","link":"/students/"+student + "/"+ course + "/Grammar"},
			{"name":"Comprehension","link":"/students/"+student + "/"+ course + "/Comprehension"}]
			return render_template("subject_page.html",student= student, items= links,progress = subject_grades,title=student,course = course)
		return redirect(url_for('student'))
示例#2
0
""" This module prepares midi file data and feeds it to the neural
    network for training """
import torch

import generation
from training import train
from data_preprocess import load_training_data

random_seed = 0
torch.manual_seed(random_seed)

if __name__ == '__main__':
    # data_path = "data/chopin/*.mid"
    # seq_length = 512
    # network_input, network_output, num_unique_tokens = load_training_data(data_dir_path=data_path, sequence_length=seq_length,
    #                                                                       save_data=True, load_data=False)
    # train(network_input, network_output, num_unique_tokens, seq_length)
    generation.generate(
        "training_data.pickle",
        "/home/okleinfeld/PianoGenerator/training_checkpoint_26.09.2019_15:15:00/checkpoint.pth",
        2)
示例#3
0
def generateBtnClicked(nodeCount, edgeCount):
    generation.generate(nodeCount, edgeCount)
示例#4
0
def best_clock(population1):
	#RETURN THE BEST CLOCK
	maxi =0
	best_clock = []
	for i in range(0,len(population1)):
		temp = fitness(population1[i])
		if temp > maxi:
			maxi = temp
			best_clock = population[i]
	return best_clock

def display_clock(clock):
	connection_list = gen.list_connections(clock)
	for i in range(0,len(connection_list)):
		print (connection_list[i][0].__class__.__name__,connection_list[i][1].__class__.__name__,connection_list[i][2])

"""
	Test
"""
input_database()
population = gen.generate(100,1,25)
natural_selection(population)
#best = best_clock(population)
#print("BEST CLOCK FITNESS SCORE:",fitness(best))

for i in range(len(ConnectionData)) :
	print(i, " : ", ConnectionData[i].name, " -> ", ConnectionData[i].point)
print( "   ",1 - abs((abs(1./60) - VITESSE_SECONDHAND) / VITESSE_SECONDHAND))
print( "   ",1 - abs((abs(1/.60) - VITESSE_MINUTEHAND) / VITESSE_MINUTEHAND))
print( "   ",1 - abs((abs(1./60) - VITESSE_HOURHAND) / VITESSE_HOURHAND))
示例#5
0
}

# mls = load_mel_spectrogram_db(original_audio_list[config['audio']], config)
spectrogram, melfilters = load_mel_spectrogram_db(
    original_audio_list[config['audio']], config)
print('Finished loading audio and creating spectrogram, shape: %d %d\n' %
      spectrogram.shape)
#

config['start_offset'] += config['use_prev_frames']
spectrogram = spectrogram.T  # rows should be different training examples, so different points in time

X = fe.extract_features(spectrogram, config)
y = fe.extract_target(spectrogram, config)

print('Training data shape: {}, training labels shape: {}'.format(
    X.shape, y.shape))
start_time = perf_counter()
predictor = LinearRegression(normalize=True)
predictor.fit(X, y)
print(perf_counter() - start_time)
#
output_spectrogram = generate(spectrogram, predictor, config, X.shape[1])
output_spectrogram = invert_mel_db(output_spectrogram.T, melfilters).T

output = reconstruct_signal_griffin_lim(output_spectrogram,
                                        config['framelength'],
                                        config['hop_length'], 80)

lr.output.write_wav('test.wav', output, config['sr'])
示例#6
0

def display_clock(clock):
    connection_list = gen.list_connections(clock)
    for i in range(0, len(connection_list)):
        print(connection_list[i][0].__class__.__name__,
              connection_list[i][1].__class__.__name__, connection_list[i][2])


"""
	Test
"""

try:
    input_database()
    population = gen.generate(100, 3, 100)
    natural_selection(population)
except KeyboardInterrupt:
    pass
finally:
    best = best_clock(population)
    print("Best fitness : ", fitness(best))
    for piece in gen.classes:
        counter = 0
        for part in best[1:]:
            if isinstance(part, piece):
                counter += 1
        print("Number of ", piece, " : ", counter)
    print("------ connections ------")
    print(gen.list_connections(best))
''' for i in range(len(valid_connections)) :
示例#7
0
def main():
    '''
    Main function that coordinates the entire process. Parses arguments that specify the exercise and the
    experiment that should be run. Initializes the model and the checkpoint managers.
    '''

    parser = argparse.ArgumentParser(
        description='Define configuration of experiments')
    parser.add_argument('--mode',
                        type=str,
                        nargs='+',
                        choices=['train', 'evaluate', 'generate'],
                        required=True)
    parser.add_argument('--experiment',
                        type=str,
                        choices=['a', 'b', 'c'],
                        required=True)
    parser.add_argument('--id', type=str, required=False)
    parser.add_argument('--epochs', type=int, default=EPOCHS, required=False)

    args = parser.parse_args()

    # Setting Experiment Id
    if args.id is None:
        exp_id = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
        print(f"No Experiment Id Set, Creating New: {exp_id}")
    else:
        exp_id = args.id
        print(f"Using Experiment Id: {exp_id}")

    # Setting Directories
    base_dir = f"{OUTPUT_DIR}/exp_{args.experiment}/{exp_id}"
    log_dir = f"{base_dir}/logs"
    submission_dir = f"{base_dir}/submissions"
    if not os.path.exists(submission_dir):
        os.makedirs(submission_dir)
    ckpt_dir = f"{base_dir}/ckpts"

    print(f"Experiment Directory: {base_dir}")

    print(f"Using Tensorflow Version: {tf.__version__}")
    print("Building Vocabulary...")
    build_vocab(input_file=PATH_TRAIN,
                output_file=PATH_VOCAB,
                top_k=VOCAB_SIZE,
                special=SPECIAL)
    word2id, id2word = build_vocab_lookup(PATH_VOCAB, "<unk>")

    # Setting Experiment Specific Configurations
    if args.experiment == 'a':
        lstm_hidden_state_size = 512
        word_embeddings = None

    elif args.experiment == 'b':
        lstm_hidden_state_size = 512
        word_embeddings = load_embedding(dim_embedding=EMBEDDING_SIZE,
                                         vocab_size=VOCAB_SIZE)

    elif args.experiment == 'c':
        lstm_hidden_state_size = 1024
        word_embeddings = load_embedding(dim_embedding=EMBEDDING_SIZE,
                                         vocab_size=VOCAB_SIZE)
    else:
        raise ValueError(f"Unknown Experiment {args.experiment}")

    print(f'Initializing Model...')
    model = LanguageModel(vocab_size=VOCAB_SIZE,
                          sentence_length=SENTENCE_LENGTH,
                          embedding_size=EMBEDDING_SIZE,
                          hidden_state_size=lstm_hidden_state_size,
                          output_size=LSTM_OUTPUT_SIZE,
                          batch_size=BATCH_SIZE,
                          word_embeddings=word_embeddings,
                          index_to_word_table=id2word)

    print(f'Initializing Optimizer...')
    optimizer = tf.keras.optimizers.Adam()

    ckpt = tf.train.Checkpoint(step=tf.Variable(1),
                               optimizer=optimizer,
                               net=model)
    manager = tf.train.CheckpointManager(ckpt, ckpt_dir, max_to_keep=5)

    if manager.latest_checkpoint:
        print(f"Restoring Model from {manager.latest_checkpoint}...")
        ckpt.restore(manager.latest_checkpoint)
        model_loaded = True
    else:
        print("Initializing Model from Scratch")
        model_loaded = False

    if "train" in args.mode:
        print(f"Starting Training...")
        train_summary_writer = tf.summary.create_file_writer(
            f"{log_dir}/train")
        with train_summary_writer.as_default():
            train(ckpt=ckpt,
                  manager=manager,
                  model=model,
                  optimizer=optimizer,
                  word2id=word2id,
                  id2word=id2word,
                  epochs=args.epochs)
        model_loaded = True

    if "evaluate" in args.mode:
        print(f"Starting Evaluation...")
        assert model_loaded, 'model must be loaded from checkpoint in order to be evaluated'

        test_summary_writer = tf.summary.create_file_writer(
            f"{log_dir}/evaluate")
        with test_summary_writer.as_default():
            evaluate(
                model=model,
                word2id=word2id,
                id2word=id2word,
                step=optimizer.iterations,
                path_submission=
                f"{submission_dir}/group35.perplexity{args.experiment.upper()}"
            )

    if "generate" in args.mode:
        print(f"Starting Generation...")
        assert model_loaded, 'model must be loaded from checkpoint in order to start generation'

        generate_summary_writer = tf.summary.create_file_writer(
            f"{log_dir}/generate")
        with generate_summary_writer.as_default():
            generate(word2id,
                     id2word,
                     model=model,
                     path_submission=f"{submission_dir}/group35.continuation")
示例#8
0
def train(D,
          G,
          Dis_optimizer,
          Enc_optimizer,
          Dec_optimizer,
          data_loader,
          writer,
          opt,
          epoch=0):
    D.train()
    G.train()

    bce_loss = nn.BCEWithLogitsLoss(reduction='elementwise_mean')

    def l1_loss(input, target):
        return torch.mean(torch.abs(input - target))

    def l2_loss(input, target):
        return torch.mean((input - target).pow(2))

    t_real = Variable(torch.ones(opt.batch_size, 1))
    t_fake = Variable(torch.zeros(opt.batch_size, 1))
    if opt.use_gpu:
        t_real = t_real.cuda()
        t_fake = t_fake.cuda()

    Dis_running_loss = 0
    Enc_running_loss = 0
    Dec_running_loss = 0
    num_itrs = len(data_loader.dataset) // opt.batch_size

    for itr, (data, _) in enumerate(data_loader):
        if data.size()[0] != opt.batch_size:
            break

        # train Discriminator ===
        Dis_optimizer.zero_grad()

        x_real = Variable(data)
        z_fake_p = Variable(torch.randn(data.shape[0], opt.nz))
        if opt.use_gpu:
            x_real = x_real.cuda()
            z_fake_p = z_fake_p.cuda()
        x_fake, mu, logvar = G(x_real)

        # L_gan ---
        y_real_loss = bce_loss(D(x_real), t_real)
        y_fake_loss = bce_loss(D(x_fake), t_fake)
        y_fake_p_loss = bce_loss(D(G.decoder(z_fake_p)), t_fake)
        L_gan_real = (y_real_loss + y_fake_loss + y_fake_p_loss) / 3.0

        # Dis_loss ---
        Dis_loss = L_gan_real
        Dis_loss.backward()
        Dis_optimizer.step()
        Dis_running_loss += Dis_loss.item()

        # train Encoder ===
        Enc_optimizer.zero_grad()

        x_real = Variable(data)
        if opt.use_gpu:
            x_real = x_real.cuda()
        x_fake, mu, logvar = G(x_real)

        # L_prior ---
        L_prior = opt.alpha * (
            -0.5 * torch.mean(1 + logvar - mu.pow(2) - logvar.exp()))

        # L_llike ---
        L_recon = opt.gamma * l1_loss(x_fake, x_real)
        L_llike = l1_loss(D.feature(x_fake), D.feature(x_real))

        # Enc_loss ---
        Enc_loss = L_prior + L_recon + L_llike
        Enc_loss.backward()
        Enc_optimizer.step()
        Enc_running_loss += Enc_loss.item()

        # train Decoder ===
        Dec_optimizer.zero_grad()

        x_real = Variable(data)
        z_fake_p = Variable(torch.randn(opt.batch_size, opt.nz))
        if opt.use_gpu:
            x_real = x_real.cuda()
            z_fake_p = z_fake_p.cuda()
        x_fake, mu, logvar = G(x_real)

        # L_gan ---
        y_real_loss = bce_loss(D(x_real), t_fake)
        y_fake_loss = bce_loss(D(x_fake), t_real)
        y_fake_p_loss = bce_loss(D(G.decoder(z_fake_p)), t_real)
        L_gan_fake = (y_real_loss + y_fake_loss + y_fake_p_loss) / 3.0

        # L_llike ---
        L_recon = opt.gamma * l1_loss(x_fake, x_real)
        L_llike = l1_loss(D.feature(x_fake), D.feature(x_real))

        # Dec_loss ---
        Dec_loss = L_recon + L_llike + L_gan_fake
        Dec_loss.backward()
        Dec_optimizer.step()
        Dec_running_loss += Dec_loss.item()

        sys.stdout.write(
            '\r\033[Kitr [{}/{}], Dis_loss: {:.6f}, Enc_loss: {:.6f}, Dec_loss: {:.6f}'
            .format(itr + 1, num_itrs, Dis_loss.item(), Enc_loss.item(),
                    Dec_loss.item()))
        sys.stdout.flush()

        if (itr + 1) % 10 == 0:
            G.eval()

            # generation
            z = Variable(torch.randn((8, opt.nz)))
            if opt.use_gpu:
                z = z.cuda()

            generated = generate(G, z)
            writer.add_image('Generated Image', generated)

            # reconstruction
            x = Variable(data)
            x = x[:8]
            if opt.use_gpu:
                x = x.cuda()

            x, xhat = reconstruct(G, x)
            reconstructed = torch.cat((x, xhat), dim=0)
            writer.add_image('Reconstructed Image', reconstructed)

            # loss
            writer.add_scalars('Loss', {
                'Discriminator': Dis_loss.item(),
                'Encoder': Enc_loss.item(),
                'Decoder': Dec_loss.item(),
                'L_gan_real': L_gan_real.item(),
                'L_gan_fake': L_gan_fake.item(),
                'L_prior': L_prior.item(),
                'L_recon': L_recon.item(),
                'L_llike': L_llike.item()
            },
                               global_step=epoch * num_itrs + itr + 1)

            G.train()

    Dis_running_loss /= num_itrs
    Enc_running_loss /= num_itrs
    Dec_running_loss /= num_itrs

    return Dis_running_loss, Enc_running_loss, Dec_running_loss
示例#9
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--log_dir', default='logs', help='log directory')
    parser.add_argument('--data_dir',
                        default='data/celeba',
                        help='data directory')
    parser.add_argument('--batch_size',
                        type=int,
                        default=64,
                        help='batch size')
    parser.add_argument('--num_workers',
                        type=int,
                        default=4,
                        help='number of workers for data laoding')
    parser.add_argument('--num_epochs',
                        type=int,
                        default=100,
                        help='number of epochs')
    parser.add_argument('--checkpoint',
                        type=int,
                        default=10,
                        help='checkpoint epoch')
    parser.add_argument('--lr', type=float, default=2e-4, help='learning rate')
    parser.add_argument('--weight_decay',
                        type=float,
                        default=1e-5,
                        help='weight decay')
    parser.add_argument('--nz',
                        type=int,
                        default=100,
                        help='dimension of latent variable')
    parser.add_argument('--alpha',
                        type=float,
                        default=1e-2,
                        help='coefficient of L_prior')
    parser.add_argument('--gamma',
                        type=float,
                        default=5,
                        help='coefficient of L_recon')
    parser.add_argument('--G_model',
                        default=None,
                        help='pretrained Generator model path')
    parser.add_argument('--D_model',
                        default=None,
                        help='pretrained Discriminator model path')
    parser.add_argument('--use_gpu', action='store_true', help='GPU mode')
    opt = parser.parse_args()
    if opt.use_gpu:
        opt.use_gpu = torch.cuda.is_available()

    if not os.path.exists(opt.data_dir):
        os.makedirs(opt.data_dir)

    if not os.path.exists(opt.log_dir):
        os.makedirs(opt.log_dir)

    writer = SummaryWriter(os.path.join(opt.log_dir, 'runs'))

    # =============== data preparation ================ #
    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])
    dataset = datasets.ImageFolder(opt.data_dir, transform=transform)
    data_loader = DataLoader(dataset,
                             batch_size=opt.batch_size,
                             shuffle=True,
                             num_workers=opt.num_workers)

    # ==================== model ====================== #
    G = Generator(opt.nz)
    D = Discriminator()
    if (opt.G_model is not None) and (opt.D_model is not None):
        G.load_state_dict(torch.load(opt.G_model))
        D.load_state_dict(torch.load(opt.D_model))
    if opt.use_gpu:
        G = G.cuda()
        D = D.cuda()

    # ================== optimizer ==================== #
    Enc_optimizer = optim.Adam(G.encoder.parameters(),
                               lr=opt.lr,
                               weight_decay=opt.weight_decay,
                               betas=(0.5, 0.999))
    Dec_optimizer = optim.Adam(G.decoder.parameters(),
                               lr=opt.lr,
                               weight_decay=opt.weight_decay,
                               betas=(0.5, 0.999))
    Dis_optimizer = optim.Adam(D.parameters(),
                               lr=opt.lr,
                               weight_decay=opt.weight_decay,
                               betas=(0.5, 0.999))

    # ==================== train ====================== #
    history = {}
    history['Dis_loss'] = []
    history['Enc_loss'] = []
    history['Dec_loss'] = []

    for epoch in range(opt.num_epochs):
        Dis_loss, Enc_loss, Dec_loss = train(D, G, Dis_optimizer,
                                             Enc_optimizer, Dec_optimizer,
                                             data_loader, writer, opt, epoch)
        sys.stdout.write(
            '\r\033[Kepoch [{}/{}], Dis_loss: {:.6f}, Enc_loss: {:.6f}, Dec_loss: {:.6f}\n'
            .format(epoch + 1, opt.num_epochs, Dis_loss, Enc_loss, Dec_loss))
        sys.stdout.flush()

        history['Dis_loss'].append(Dis_loss)
        history['Enc_loss'].append(Enc_loss)
        history['Dec_loss'].append(Dec_loss)

        if (epoch + 1) % opt.checkpoint == 0:
            # save model
            torch.save(
                G.state_dict(),
                os.path.join(opt.log_dir,
                             'G_epoch{:04d}.pth'.format(epoch + 1)))
            torch.save(
                D.state_dict(),
                os.path.join(opt.log_dir,
                             'D_epoch{:04d}.pth'.format(epoch + 1)))

            G.eval()

            # generation
            z = Variable(torch.randn((10, opt.nz)))
            if opt.use_gpu:
                z = z.cuda()

            generated = generate(G, z)
            save_image(generated,
                       os.path.join(
                           opt.log_dir,
                           'generated_epoch{:04d}.png'.format(epoch + 1)),
                       nrow=10)

            # reconstruction
            for data, _ in data_loader:
                x = Variable(data)
                x = x[:10]
                if opt.use_gpu:
                    x = x.cuda()

                x, xhat = reconstruct(G, x)
                reconstructed = torch.cat((x, xhat), dim=0)
                save_image(reconstructed,
                           os.path.join(
                               opt.log_dir,
                               'reconstructed_epoch{:04d}.png'.format(epoch +
                                                                      1)),
                           nrow=10)
                break

    # ================== save model ==================== #
    torch.save(
        G.state_dict(),
        os.path.join(opt.log_dir, 'G_epoch{:04d}.pth'.format(opt.num_epochs)))
    torch.save(
        D.state_dict(),
        os.path.join(opt.log_dir, 'D_epoch{:04d}.pth'.format(opt.num_epochs)))

    G.eval()

    # generation
    z = Variable(torch.randn((10, opt.nz)))
    if opt.use_gpu:
        z = z.cuda()

    generated = generate(G, z)
    save_image(generated,
               os.path.join(opt.log_dir, 'generated_epoch{:04d}.png'.format(
                   opt.num_epochs)),
               nrow=10)

    # reconstruction
    for data, _ in data_loader:
        x = Variable(data)
        x = x[:10]
        if opt.use_gpu:
            x = x.cuda()

        x, xhat = reconstruct(G, x)
        reconstructed = torch.cat((x, xhat), dim=0)
        save_image(reconstructed,
                   os.path.join(
                       opt.log_dir,
                       'reconstructed_epoch{:04d}.png'.format(opt.num_epochs)),
                   nrow=10)
        break

    # ================== show loss ===================== #
    fig = plt.figure()
    plt.subplot(3, 1, 1)
    plt.plot(history['Dis_loss'])
    plt.ylabel('Discriminator Loss')
    plt.xlabel('Epoch')
    plt.grid()

    plt.subplot(3, 1, 2)
    plt.plot(history['Enc_loss'])
    plt.ylabel('Encoder Loss')
    plt.xlabel('Epoch')
    plt.grid()

    plt.subplot(3, 1, 3)
    plt.plot(history['Dec_loss'])
    plt.ylabel('Decoder Loss')
    plt.xlabel('Epoch')
    plt.grid()
    plt.savefig(os.path.join(opt.log_dir, 'loss.png'))

    with open(os.path.join(opt.log_dir, 'history.pkl'), 'wb') as f:
        pickle.dump(history, f)
示例#10
0
def main():
    pygame.init()
    drawing.init()

    screen_data = drawing.ScreenData(
        pygame.display.set_mode(config.SCREEN_RES, pygame.RESIZABLE), (0, 0))
    input_data = InputData()
    path_data = pathing.PathData()
    selection = None

    lots = []

    city = generation.generate()
    city_labels = []
    for road in city.roads:
        city_labels.append((str(road.global_id), road.point_at(0.5)))

    prev_time = pygame.time.get_ticks()

    running = True
    while running:
        if pygame.time.get_ticks() - prev_time < 16:
            continue

        input_data.pos = pygame.mouse.get_pos()
        input_data.pressed = pygame.mouse.get_pressed()
        prev_time = pygame.time.get_ticks()

        for event in pygame.event.get():
            if event.type == pygame.QUIT:
                running = False
            elif event.type == pygame.VIDEORESIZE:
                screen_data.screen = pygame.display.set_mode(
                    event.dict["size"], pygame.RESIZABLE)
                config.SCREEN_RES = event.dict["size"]
            elif event.type == pygame.KEYDOWN:
                if event.key == pygame.K_g:
                    debug.SHOW_ROAD_ORDER = False
                    city_labels = []
                    selection = None
                    path_data = pathing.PathData()
                    city = generation.generate()
                    for road in city.roads:
                        city_labels.append(
                            (str(road.global_id), road.point_at(0.5)))
                if event.key == pygame.K_b:
                    lots = build_gen.gen_lots(city)
                # Pathing
                elif event.key == pygame.K_z:
                    path_data.start = road_near_point(input_data.pos,
                                                      screen_data, city)
                elif event.key == pygame.K_x:
                    path_data.end = road_near_point(input_data.pos,
                                                    screen_data, city)
                elif event.key == pygame.K_c:
                    pathing.astar(path_data, city.roads)
                elif event.key == pygame.K_v:
                    pathing.dijkstra(path_data, city.roads)
                # Debug Views
                else:
                    handle_keys_debug(event.key)
            elif event.type == pygame.MOUSEBUTTONDOWN:
                # Zooming
                if event.button == 4:
                    screen_data.zoom_in(input_data.pos)
                elif event.button == 5:
                    screen_data.zoom_out(input_data.pos)

        # Dragging & Selection
        if input_data.prev_pressed[0]:
            if input_data.pressed[0]:  # Continue drag
                screen_data.pan = vectors.add(
                    screen_data.pan,
                    vectors.sub(input_data.pos, input_data.drag_prev_pos))
                input_data.drag_prev_pos = input_data.pos
            else:
                if input_data.pos == input_data.drag_start:  # Select road
                    selection = selection_from_road(
                        road_near_point(input_data.drag_start, screen_data,
                                        city))
                # Clear out drag information
                input_data.drag_start = None
                input_data.drag_prev_pos = (0, 0)
        else:
            if input_data.pressed[0]:  # Drag started
                input_data.drag_start = input_data.pos
                input_data.drag_prev_pos = input_data.pos

        # Drawing
        screen_data.screen.fill((0, 0, 0))
        if debug.SHOW_HEATMAP:
            drawing.draw_heatmap(50, city, screen_data)
        if debug.SHOW_SECTORS:
            drawing.draw_sectors(screen_data)

        color = (125, 255, 50)
        for poly in lots:
            temp = []
            for point in poly:
                temp.append(
                    drawing.world_to_screen(point, screen_data.pan,
                                            screen_data.zoom))
            pygame.draw.polygon(screen_data.screen, color, temp)
            color = (color[0], color[1] - 11, color[2] + 7)
            if color[1] < 0:
                color = (color[0], 255, color[2])
            if color[2] > 255:
                color = (color[0], color[1], 0)

        # Draw roads
        if debug.SHOW_ISOLATE_SECTOR and selection is not None:
            for sector in sectors.from_seg(selection.road):
                drawing.draw_all_roads(city.sectors[sector], screen_data)
        elif debug.SHOW_MOUSE_SECTOR:
            mouse_sec = sectors.containing_sector(
                drawing.screen_to_world(input_data.pos, screen_data.pan,
                                        screen_data.zoom))
            if mouse_sec in city.sectors:
                drawing.draw_all_roads(city.sectors[mouse_sec], screen_data)
        else:
            tl_sect = sectors.containing_sector(
                drawing.screen_to_world((0, 0), screen_data.pan,
                                        screen_data.zoom))
            br_sect = sectors.containing_sector(
                drawing.screen_to_world(config.SCREEN_RES, screen_data.pan,
                                        screen_data.zoom))
            for x in range(tl_sect[0], br_sect[0] + 1):
                for y in range(tl_sect[1], br_sect[1] + 1):
                    if (x, y) in city.sectors:
                        drawing.draw_all_roads(city.sectors[(x, y)],
                                               screen_data)

        drawing.draw_roads_selected(selection, screen_data)
        drawing.draw_roads_path(path_data, screen_data)

        if debug.SHOW_INFO:
            debug_labels = debug.labels(screen_data, input_data, path_data,
                                        selection, city)

            for x in range(len(debug_labels[0])):
                label_pos = (10, 10 + x * 15)
                drawing.draw_label_screen((debug_labels[0][x], label_pos),
                                          screen_data, 1)

            for x in range(len(debug_labels[1])):
                label_pos = (config.SCREEN_RES[0] - 10, 10 + x * 15)
                drawing.draw_label_screen((debug_labels[1][x], label_pos),
                                          screen_data, -1)

        if debug.SHOW_ROAD_ORDER:
            for label in city_labels:
                drawing.draw_label_world(label, screen_data, 1)

        pygame.display.flip()
示例#11
0
from generation import generate
import numpy as np
import pandas as pd
import logging

logging.basicConfig(level=logging.DEBUG)
logging.info("Starting Generation")

data = generate()
dates = data.index
times = dates.astype(int) // 10**9
values = data.value.values

logging.info("Start transformation")
def definir_accion(d):
    if d==1:
        return 'entrar'
    elif d==-1:
        return 'salir'
    else: 
        return '\'\''

df = pd.DataFrame({"value":values, "timestamp":times})
df["Measure"] = "Personas"
df['device'] = 'd1'
df['sensor'] = 'reflectivo'
df['accion'] = df.value.apply(definir_accion)
df['dia'] = dates.day_name()

format_to="{}, device={}, sensor={}, accion={} value={}, {}"
def main():

    parser = configargparse.ArgParser(default_config_files=['config.ini'])
    parser.add('--width', required=False, type=int, help='SVG width')
    parser.add('--height', required=False, type=int, help='SVG height')
    parser.add('--color_palette',
               required=False,
               action='append',
               help='list of colors for stars')
    parser.add('--font_size',
               required=False,
               type=int,
               help='font size for planet name')

    parser.add('--nb_stars', required=False, type=int, help='number of stars')
    parser.add('--min_size_stars',
               required=False,
               type=int,
               help='minimal star size')
    parser.add('--max_size_stars',
               required=False,
               type=int,
               help='maximal star size')
    parser.add('--color_proba',
               required=False,
               type=float,
               help='probability of coloured star')

    parser.add('--nb_planets',
               required=False,
               type=int,
               help='number of planets')
    parser.add('--distance_planet',
               required=False,
               type=int,
               help='distance in pixel between each planet')
    parser.add('--min_size_planet',
               required=False,
               type=int,
               help='minimal planet size')
    parser.add('--max_size_planet',
               required=False,
               type=int,
               help='maximal planet size')

    parser.add('--ring_proba',
               required=False,
               type=float,
               help='probability of a ringed planet')
    parser.add('--min_ring',
               required=False,
               type=int,
               help='minimal number of rings')
    parser.add('--max_ring',
               required=False,
               type=int,
               help='maximal number of rings')

    parser.add('--moon_proba',
               required=False,
               type=float,
               help='probability for a planet to have moons')
    parser.add('--distance_moon',
               required=False,
               type=int,
               help='distance between each moon')
    parser.add('--min_nb_moons',
               required=False,
               type=int,
               help='minimal number of moon')
    parser.add('--max_nb_moons',
               required=False,
               type=int,
               help='maximal number of moon')
    parser.add('--min_size_moon',
               required=False,
               type=int,
               help='minimal size of moon')
    parser.add('--max_size_moon',
               required=False,
               type=int,
               help='maximal size of moon')

    parser.add('--id', required=False, help='random seed for generation')
    parser.add('-f',
               '--filename',
               default='solar_system.svg',
               required=False,
               help='file name to save')
    parser.add('-v', '--version', action='version', version=__VERSION__)

    options = parser.parse_args()
    #print(options)
    #print(parser.format_values())

    if options.id is None:
        options.id = utils.generate_id(5, options.nb_planets)
        #print(options.id)

    print('Generating planetary system "{}"'.format(options.id))
    generation.generate(options)