def load_images(): """ Scrapes images from Baker & Taylor API. Does not save book data. Requires existing books.json to work. """ with settings(warn_only=True): local('mkdir -p www/img/cover') data.load_images()
def training(epochs=1, batch_size=32): #Loading Data x_train = load_images() batches = x_train.shape[0] / batch_size # Creating GAN generator = create_generator() discriminator = create_discriminator() gan = create_gan(generator, discriminator) # Adversarial Labels y_valid = np.ones(batch_size)*0.9 y_fake = np.zeros(batch_size) discriminator_loss, generator_loss = [], [] for epoch in range(1, epochs+1): print('-'*15, 'Epoch', epoch, '-'*15) g_loss = 0; d_loss = 0 for _ in tqdm(range(int(batches))): # Random Noise and Images Set noise = generate_noise(batch_size) image_batch = x_train[np.random.randint(0, x_train.shape[0], size=batch_size)] # Generate Fake Images generated_images = generator.predict(noise) # Train Discriminator (Fake and Real) discriminator.trainable = True d_valid_loss = discriminator.train_on_batch(image_batch, y_valid) d_fake_loss = discriminator.train_on_batch(generated_images, y_fake) d_loss += (d_fake_loss + d_valid_loss)/2 # Train Generator noise = generate_noise(batch_size) discriminator.trainable = False g_loss += gan.train_on_batch(noise, y_valid) discriminator_loss.append(d_loss/batches) generator_loss.append(g_loss/batches) if epoch % PLOT_FRECUENCY == 0: plot_images(epoch, generator) plot_loss(epoch, generator_loss, discriminator_loss) save_images(generator)
def test(): model = Model().cuda() test_data = load_images(['test'])[0] load_checkpoint(408408, model) with torch.no_grad(): for key, batch in test_data.items(): for i in range(0, len(batch[0]), N): x = batch[0][i:i + N].cuda() y = batch[1][i:i + N][0].cuda() y_pred = model(x) for j in range(0, N): img = x[j].reshape(x[j].shape[1:]).cpu().numpy() real = get_prediction(y[j]) gen = get_prediction(y_pred[j].argmax(dim=0)) print('real:', real) print('gen:', gen) plt.imshow(img) plt.show() loss /= n return loss pass
layers = [l1,l2,l3,l4,l5, l6,l7, l8, output_layer] images = [] y = [] file_names = [] dimensions = [] train_labels = [x for x in os.listdir("train") if os.path.isdir("{0}{1}{2}".format("train", os.sep, x))] train_directories = ["{0}{1}{2}".format("train", os.sep, x) for x in train_labels] train_labels, train_directories = zip(*sorted(zip(train_labels, train_directories), key=lambda x: x[0])) for idx, folder in enumerate(train_directories): for f_name_dir in os.walk(folder): dir_images, fnames, dims = load_images(f_name_dir, img_dim=img_dim) images = images + dir_images y = y + [idx for x in dir_images] dimensions = dimensions + dims file_names = file_names + fnames def to_one_hot(l): out = np.zeros((len(l), len(set(l)))) for idx, label in enumerate(l): out[idx, label] = 1 return out y = to_one_hot(y) def predict(model, X_test): model.set_batch_size(batch_size)
train_labels = [ x for x in os.listdir("train") if os.path.isdir("{0}{1}{2}".format("train", os.sep, x)) ] train_directories = [ "{0}{1}{2}".format("train", os.sep, x) for x in train_labels ] train_labels, train_directories = list( zip(*sorted(zip(train_labels, train_directories), key=lambda x: x[0]))) for idx, folder in enumerate(train_directories): for f_name_dir in os.walk(folder): dir_images, fnames, dims = load_images(f_name_dir, img_dim=img_dim, as_grey=as_grey) images = images + dir_images y = y + [idx for x in dir_images] dimensions = dimensions + dims file_names = file_names + fnames def to_one_hot(l): out = np.zeros((len(l), len(set(l)))) for idx, label in enumerate(l): out[idx, label] = 1 return out y = to_one_hot(y)
#!/usr/bin/env python3 import sys import torch import numpy as np from neural_net import load_model from data import train_selector, test_selector1, test_selector2, load_images np.random.seed(0) torch.manual_seed(0) net = load_model(sys.argv[1]) images = [] for selector in (train_selector, test_selector1, test_selector2): images += load_images(selector, max_per_class=1)[0] batch = net(torch.cat(images, dim=0)) transpose = batch.t() norms = transpose.pow(2).sum(dim=1).clamp(min=0.001).sqrt() norm_mat = norms.unsqueeze(0) * norms.unsqueeze(1) redundancy = ((transpose @ batch) / norm_mat).abs().mean() print("redudancy", redundancy)
def load_level(f): planets = [] planet_dict = {} platforms = [] platform_dict = {} monsters = [] boosters = [] bullets = [] player_start = [300, 300] settings = {'update_all': False, 'song': '4'} checkpoints = [] stars = [] ports = [] stables = [] parts = [] txt = text.Text() img_dict = data.load_images() level = data.load_level_file(f) level = level.readlines() for line in level: l = line.split() if len(l) == 0: continue if l[0] == "player": player_start = [int(l[1]), int(l[2])] checkpoints.insert( 0, enviroment.Checkpoint(int(l[1]), int(l[2]), img_dict)) elif l[0] == "planet": planets.append( enviroment.Planet(int(l[1]), int(l[2]), int(l[3]), int(l[4]), int(l[5]), l[6])) if len(l) > 7: planet_dict[l[7]] = planets[-1] elif l[0] == "star": stars.append(pickup.Star((int(l[1]), int(l[2])), img_dict)) elif l[0] == "port": ports.append( enviroment.Port(int(l[1]), int(l[2]), l[3] + '.txt', img_dict['port'])) elif l[0] == "part": parts.append( pickup.Part(int(l[1]), int(l[2]), img_dict['parts'][int(l[3])])) elif l[0] == "update_all": settings['update_all'] = True elif l[0] == "song": settings['song'] = l[1] for line in level: l = line.split() if len(l) == 0: continue elif l[0] == "platform": if l[1] == 'a': platforms.append( enviroment.Platform((int(l[2]), int(l[3])), (int(l[4]), int(l[5])), img_dict, l[6])) if len(l) > 7: platform_dict[l[7]] = platforms[-1] else: platforms.append( enviroment.RelativePlatform(planet_dict[l[2]], int(l[3]), int(l[4]), int(l[5]), int(l[6]), img_dict)) if len(l) > 7: platform_dict[l[7]] = platforms[-1] for line in level: l = line.split() if len(l) == 0: continue elif l[0] == "booster": if l[1] == 'a': boosters.append( enviroment.Booster(img_dict, l[1], planet_dict[l[2]], int(l[3]), int(l[4]))) else: boosters.append( enviroment.Booster(img_dict, l[1], platform_dict[l[2]], int(l[3]), int(l[4]))) elif l[0] == "O": if l[1] == 'a': monsters.append( monster.O(l[1], img_dict, planet_dict[l[2]], int(l[3]), int(l[4]))) else: monsters.append( monster.O(l[1], img_dict, platform_dict[l[2]], int(l[3]), int(l[4]))) elif l[0] == "C": if l[1] == 'a': monsters.append( monster.C(l[1], img_dict, bullets, planet_dict[l[2]], int(l[3]), int(l[4]))) else: monsters.append( monster.C(l[1], img_dict, bullets, platform_dict[l[2]], int(l[3]), int(l[4]))) elif l[0] == "I": if l[1] == 'a': monsters.append( monster.I(l[1], img_dict, planet_dict[l[2]], int(l[3]))) else: monsters.append( monster.I(l[1], img_dict, platform_dict[l[2]], int(l[3]))) elif l[0] == "eye": if l[1] == 'a': monsters.append( monster.Eye(l[1], img_dict, planet_dict[l[2]], int(l[3]))) else: monsters.append( monster.Eye(l[1], img_dict, platform_dict[l[2]], int(l[3]))) elif l[0] == "Q": monsters.append( monster.Q(img_dict, bullets, planet_dict[l[1]], int(l[2]), int(l[3]), int(l[4]))) elif l[0] == 'checkpoint': if l[1] == 'a': if len(l) == 4: checkpoints.append( enviroment.RelativeCheckpoint('a', img_dict, planet_dict[l[2]], int(l[3]))) elif len(l) > 4: checkpoints.append( enviroment.RelativeCheckpoint('a', img_dict, planet_dict[l[2]], int(l[3]), int(l[4]))) elif l[1] == 'p': if len(l) == 4: checkpoints.append( enviroment.RelativeCheckpoint('p', img_dict, platform_dict[l[2]], int(l[3]))) elif len(l) > 4: checkpoints.append( enviroment.RelativeCheckpoint('p', img_dict, platform_dict[l[2]], int(l[3]), int(l[4]))) else: checkpoints.append( enviroment.Checkpoint(int(l[1]), int(l[2]), img_dict)) elif l[0] == 'sign': if l[1] == 'a': string = '' for i in range(len(l)): if i > 4: string += l[i] + ' ' stables.append( enviroment.RelativeStableObject( 'a', txt.render(string, (200, 200, 200)), planet_dict[l[2]], int(l[3]), int(l[4]))) elif l[1] == 'p': string = '' for i in range(len(l)): if i > 4: string += l[i] + ' ' stables.append( enviroment.RelativeStableObject( 'p', txt.render(string, (200, 200, 200)), platform_dict[l[2]], int(l[3]), int(l[4]))) else: string = '' for i in range(len(l)): if i > 2: string += l[i] + ' ' stables.append( enviroment.StableObject((int(l[1]), int(l[2])), txt.render(string, (200, 200, 200)))) # make gravity circles for p in planets: c = 2 * p.gravity_radius * math.pi points = int(round(c / PLANET.POINT_DISTANCE)) angle_dist = 360 / float(points) for x in range(points): stables.append( enviroment.RelativeStableObject( 'a', img_dict['dash'], p, x * angle_dist, p.gravity_radius - p.circle.radius - 8)) return player.Player( player_start, planets, platforms, img_dict ), planets, platforms, checkpoints, monsters, boosters, stars, bullets, ports, stables, parts, settings
def start(jukebox, level, first_time=False): menu = False if level == "menu.txt": menu = True next_level = None pygame.display.set_caption("Moon's moons") screen = pygame.display.set_mode( (SCREEN.WIDTH, SCREEN.HEIGHT)) #, pygame.FULLSCREEN) if first_time and level in STORY: story(screen, STORY[level]) dim = screen.copy().convert_alpha() dim.fill((0, 0, 0, 200)) screen_rect = screen.get_rect() background = data.load_image('background.png') img_dict = data.load_images() overlay = hud.Hud(SCREEN.WIDTH, SCREEN.HEIGHT, img_dict) pl, planets, platforms, checkpoints, monsters, boosters, stars, bullets, ports, stables, parts, settings = load_level( level) last_checkpoint = checkpoints.pop(0).circle.center cam = camera.Camera(pl) pl.set_camera(cam) decaying = [] star_counter = 0 tries = 1 buttons = [] buttons.append( button.Button(750, 50, 'esc', data.load_image('cross.png', True), data.load_image('cross_over.png', True))) buttons.append( button.Button(680, 50, 'sound', data.load_image('sound.png', True), data.load_image('sound_over.png', True))) for c in checkpoints: c.update_angle(planets) for p in ports: p.update_angle(planets) jukebox.play_song(settings['song']) running = True start_time = last_time = pygame.time.get_ticks() clock = pygame.time.Clock() shown = False slowed = False won = False while running: clock.tick(100) for e in pygame.event.get(): if e.type == pygame.QUIT: running = False if e.type == pygame.KEYDOWN and e.key == pygame.K_ESCAPE: running = False if e.type == pygame.KEYDOWN and e.key == pygame.K_r: pl.respawn(last_checkpoint) if e.type == pygame.KEYDOWN and e.key == pygame.K_e: if shown: shown = False else: shown = True if e.type == pygame.KEYDOWN and e.key == pygame.K_s: if slowed: slowed = False else: slowed = True time = pygame.time.get_ticks() delta = time - last_time if slowed: delta /= 10 if delta > 300: delta = delta % 300 external = [0, 0] for b in boosters: external = sum(external, b.get_force(pl)) state = pl.update(pygame.key.get_pressed(), delta, external) if state == 'dead': pl.respawn(last_checkpoint) tries += 1 for m in monsters: if settings['update_all']: m.update(delta) else: if m.has_line: point = m.line.start else: point = m.circle.center if cam.rect.collidepoint(point): m.update(delta) for b in bullets: b.update(delta) for b in boosters: b.update(delta) cam.update() last_time = time screen_rect.x = cam.x / 10 + SCREEN.WIDTH / 2 screen_rect.y = cam.y / 10 + SCREEN.HEIGHT / 2 screen.blit(background, (0, 0), screen_rect) for ds in decaying: if ds.update(pl, delta): decaying.remove(ds) show(ds, screen, cam) for s in stars: if s.update(pl, delta): star_counter += 1 decaying.append(stars.pop(stars.index(s))) show(s, screen, cam) for p in parts: if p.update(pl, delta): decaying.append(parts.pop(parts.index(p))) show(p, screen, cam) if shown: pygame.draw.circle(screen, (150, 150, 150), cam.shift(p.circle.center), p.circle.radius) bullets[:] = [ b for b in bullets if cam.rect.collidepoint(b.circle.center) ] blen = len(bullets) - 1 for b in range(len(bullets)): c = False for p in planets: if p.circle.collide_circle(bullets[blen - b].circle): c = True break if not c: for p in platforms: if p.line.collide_circle(bullets[blen - b].circle, PLATFORM.BORDER): c = True break if c: del bullets[blen - b] blen = len(bullets) - 1 for b in range(len(bullets)): if bullets[blen - b].circle.collide_circle(pl.circle): pl.respawn(last_checkpoint) tries += 1 del bullets[blen - b] ########################## ######### DRAW ########### ########################## for s in stables: show(s, screen, cam) for p in ports: show(p, screen, cam) if len(parts) == 0 and p.circle.collide_circle(pl.circle): next_level = p.level running = False won = True for b in boosters: show(b, screen, cam) for c in checkpoints: if c.circle.collide_circle( pl.circle) and last_checkpoint != c.circle.center: last_checkpoint = c.circle.center for cp in checkpoints: cp.uncap() c.cap() c.update(delta) screen.blit( c.image, (c.image_position[0] - cam.x, c.image_position[1] - cam.y)) for b in bullets: show(b, screen, cam) for m in monsters: kill = False if m.has_line: if m.line.collide_circle(pl.circle, m.border): kill = True else: if m.circle.collide_circle(pl.circle): kill = True if kill: pl.respawn(last_checkpoint) tries += 1 show(m, screen, cam) if shown: if m.has_line: pygame.draw.line(screen, (150, 50, 255), cam.shift(m.line.start), cam.shift(m.line.end), m.border * 2) else: pygame.draw.circle(screen, (150, 50, 255), cam.shift(m.circle.center), m.circle.radius) show(pl, screen, cam) if shown: pygame.draw.circle(screen, (150, 50, 255), cam.shift(pl.circle.center), pl.circle.radius) for p in planets: show(p, screen, cam) if shown: pygame.draw.circle(screen, (50, 150, 255), cam.shift(p.circle.center), p.circle.radius) for p in platforms: screen.blit( p.image, (p.image_position[0] - cam.x, p.image_position[1] - cam.y)) for b in buttons: e = b.update() if e == 'esc': running = False elif e == 'sound': jukebox.toggle() screen.blit(b.image, b.image_position) if not menu: overlay.update(delta, star_counter, tries) screen.blit(overlay.image, (0, 0)) if shown: screen.blit(dim, (0, 0)) pygame.display.flip() if not menu: if won: data.save_level(star_counter, tries, level) else: data.save_level(0, 0, level) return next_level
def e_step(game, iteration, logger, batch_size, z_dim, learn_termination): '''iteration is the last model iteration. We'll save the latents as iteration + 1''' print('E step: %s (%d)' % (game, iteration)) model = logger.load_model(iteration) predict_model = tf.keras.Model(inputs=[model.input[0]], outputs=[model.get_layer('action_matrix').output, model.get_layer('latent_matrix').output, model.get_layer('termination_matrix').output],) metrics = {} for traj_index in tqdm(data.get_traj_index_vec(game)): image_vec = data.load_images(game, traj_index) size = len(image_vec) image_vec = tf.constant(image_vec) dataset = tf.data.Dataset.from_tensor_slices(image_vec) # Transform and batch data at the same time dataset = dataset.map(data.image_preprocess_fn, num_parallel_calls=4) dataset = dataset.batch(batch_size) dataset = dataset.prefetch(tf.contrib.data.AUTOTUNE) s = dataset.make_one_shot_iterator().get_next() steps = int(np.ceil(float(size) / float(batch_size))) [action_probs, latent_probs, termination_probs] = predict_model.predict([s], steps=steps) assert len(action_probs) == size _check_prob_matrix(action_probs) _check_prob_matrix(latent_probs) _check_prob_matrix(termination_probs) # Compute the node potentials a_vec = np.array(data.load_actions(game, traj_index)).flatten() action_probs = action_probs[range(size), :, a_vec] # T x Z # Note: We log-scale the node potentials, because we use a sum-based # version of Viterbi. node_potentials = np.log(action_probs) # Compute the edge potentials p_h_terminate = latent_probs[:-1] p_h_continue = np.eye(z_dim)[None, :, :] # termination_probs[..., 1] is probability that we *do* terminate if learn_termination: p_h = termination_probs[:-1, :, 1, None] * p_h_terminate + termination_probs[:-1, :, 0, None] * p_h_continue else: p_h = p_h_terminate _check_prob_matrix(p_h) edge_potentials = np.log(p_h) (z_vec, objective) = viterbi.viterbi(edge_potentials, node_potentials) # For the selected skills, what is the probability of the true action? action_probs_pos_mean = np.mean(action_probs[range(size), z_vec]) # For the non-selected skills, what is the probability of the true action? action_probs_neg_mean = (np.sum(action_probs) - size * action_probs_pos_mean) / (size * (z_dim - 1.0)) num_empty = z_dim - len(set(z_vec)) latent_switches = np.where(z_vec[:-1] != z_vec[1:])[0] avg_length = np.mean(latent_switches[1:] - latent_switches[:-1]) avg_num_actions = np.mean([len(set(a_vec[z_vec == z])) for z in range(z_dim)]) for (key, value) in [('empty_skills', num_empty), ( 'skill_duration', avg_length), ( 'actions_per_skill', avg_num_actions), ( 'viterbi_objective', objective), ( 'action_prob_pos_mean', action_probs_pos_mean), ( 'action_prob_neg_mean', action_probs_neg_mean)]: metrics[key] = metrics.get(key, []) + [value] logger.save_z(iteration, traj_index, z_vec) print('Assignment metrics (%d):' % iteration) for (key, value_vec) in metrics.items(): logger.log(iteration, key, np.mean(value_vec)) print('\t%s = %.2f' % (key, np.mean(value_vec)))
from keras.models import Sequential from keras.layers import Conv2D, MaxPooling2D from keras.layers import Activation, Dropout, Flatten, Dense, BatchNormalization, GaussianNoise import numpy as np import data import tensorflow as tf import matplotlib.pyplot as plt from tensorflow.keras.preprocessing.image import ImageDataGenerator from keras.utils import plot_model from keras.optimizers import Adam config = tf.ConfigProto() config.gpu_options.allow_growth = True session = tf.Session(config=config) n_data = np.array(data.load_images()) n_label = np.array(data.res_img) datagen = ImageDataGenerator(rotation_range=90, brightness_range=[ 0.3, 1.0], channel_shift_range=100) it = datagen.flow(n_data, n_label, batch_size=4) model = Sequential() model.add(Conv2D(64, (2, 2), padding='same', input_shape=(256, 256, 3))) model.add(Activation('relu')) model.add(BatchNormalization()) model.add(GaussianNoise(0.2)) model.add(Conv2D(64, (3, 3))) model.add(Activation('sigmoid')) model.add(MaxPooling2D(pool_size=(4, 4)))
import time import sys from logger import Logger import os from tqdm import tqdm import json exp_name = sys.argv[1] with open(os.path.join(exp_name, 'args.json')) as f: game = json.load(f)['game'] traj_index = int(sys.argv[2]) iteration = int(sys.argv[3]) logger = Logger(exp_name, frozen=True) z_vec = logger.load_z(iteration, traj_index) image_vec = data.load_images(game, traj_index) folder = os.path.join(exp_name, 'videos') filename = os.path.join(folder, '%d_%d.avi' % (iteration, traj_index)) if not os.path.exists(folder): os.makedirs(folder) fourcc = cv2.VideoWriter_fourcc(*'MJPG') video = cv2.VideoWriter(filename, fourcc, 30.0, (500, 500)) for (z, filename) in tqdm(zip(z_vec, image_vec)): img = cv2.imread(filename) img = cv2.resize(img, (500, 500), cv2.INTER_NEAREST) font = cv2.FONT_HERSHEY_SIMPLEX cv2.putText(img, str(z), (400, 50), font, 2, (0, 0, 255), 4, cv2.LINE_AA) video.write(img)
def load_level(f): planets = [] planet_dict = {} platforms = [] platform_dict = {} monsters = [] boosters = [] bullets = [] player_start = [300,300] settings = {'update_all': False, 'song': '4'} checkpoints = [] stars = [] ports = [] stables = [] parts = [] txt = text.Text() img_dict = data.load_images() level = data.load_level_file(f) level = level.readlines() for line in level: l = line.split() if len(l) == 0: continue if l[0] == "player": player_start = [int(l[1]), int(l[2])] checkpoints.insert(0, enviroment.Checkpoint(int(l[1]), int(l[2]), img_dict)) elif l[0] == "planet": planets.append(enviroment.Planet(int(l[1]), int(l[2]), int(l[3]), int(l[4]), int(l[5]), l[6])) if len(l) > 7: planet_dict[l[7]] = planets[-1] elif l[0] == "star": stars.append(pickup.Star((int(l[1]), int(l[2])),img_dict)) elif l[0] == "port": ports.append(enviroment.Port(int(l[1]), int(l[2]), l[3] + '.txt', img_dict['port'])) elif l[0] == "part": parts.append(pickup.Part(int(l[1]), int(l[2]), img_dict['parts'][int(l[3])])) elif l[0] == "update_all": settings['update_all'] = True elif l[0] == "song": settings['song'] = l[1] for line in level: l = line.split() if len(l) == 0: continue elif l[0] == "platform": if l[1] == 'a': platforms.append(enviroment.Platform((int(l[2]), int(l[3])), (int(l[4]), int(l[5])), img_dict, l[6])) if len(l) > 7: platform_dict[l[7]] = platforms[-1] else: platforms.append(enviroment.RelativePlatform(planet_dict[l[2]], int(l[3]), int(l[4]), int(l[5]), int(l[6]), img_dict)) if len(l) > 7: platform_dict[l[7]] = platforms[-1] for line in level: l = line.split() if len(l) == 0: continue elif l[0] == "booster": if l[1] == 'a': boosters.append(enviroment.Booster(img_dict, l[1], planet_dict[l[2]], int(l[3]), int(l[4]))) else: boosters.append(enviroment.Booster(img_dict, l[1], platform_dict[l[2]], int(l[3]), int(l[4]))) elif l[0] == "O": if l[1] == 'a': monsters.append(monster.O(l[1], img_dict, planet_dict[l[2]], int(l[3]), int(l[4]))) else: monsters.append(monster.O(l[1], img_dict, platform_dict[l[2]], int(l[3]), int(l[4]))) elif l[0] == "C": if l[1] == 'a': monsters.append(monster.C(l[1], img_dict, bullets, planet_dict[l[2]], int(l[3]), int(l[4]))) else: monsters.append(monster.C(l[1], img_dict, bullets, platform_dict[l[2]], int(l[3]), int(l[4]))) elif l[0] == "I": if l[1] == 'a': monsters.append(monster.I(l[1], img_dict, planet_dict[l[2]], int(l[3]))) else: monsters.append(monster.I(l[1], img_dict, platform_dict[l[2]], int(l[3]))) elif l[0] == "eye": if l[1] == 'a': monsters.append(monster.Eye(l[1], img_dict, planet_dict[l[2]], int(l[3]))) else: monsters.append(monster.Eye(l[1], img_dict, platform_dict[l[2]], int(l[3]))) elif l[0] == "Q": monsters.append(monster.Q(img_dict, bullets, planet_dict[l[1]], int(l[2]), int(l[3]), int(l[4]))) elif l[0] == 'checkpoint': if l[1] == 'a': if len(l) == 4: checkpoints.append(enviroment.RelativeCheckpoint('a', img_dict, planet_dict[l[2]], int(l[3]))) elif len(l) > 4: checkpoints.append(enviroment.RelativeCheckpoint('a', img_dict, planet_dict[l[2]], int(l[3]), int(l[4]))) elif l[1] == 'p': if len(l) == 4: checkpoints.append(enviroment.RelativeCheckpoint('p', img_dict, platform_dict[l[2]], int(l[3]))) elif len(l) > 4: checkpoints.append(enviroment.RelativeCheckpoint('p', img_dict, platform_dict[l[2]], int(l[3]), int(l[4]))) else: checkpoints.append(enviroment.Checkpoint(int(l[1]), int(l[2]), img_dict)) elif l[0] == 'sign': if l[1] == 'a': string = '' for i in range(len(l)): if i > 4: string += l[i] + ' ' stables.append(enviroment.RelativeStableObject('a', txt.render(string, (200,200,200)), planet_dict[l[2]], int(l[3]), int(l[4]))) elif l[1] == 'p': string = '' for i in range(len(l)): if i > 4: string += l[i] + ' ' stables.append(enviroment.RelativeStableObject('p', txt.render(string, (200,200,200)), platform_dict[l[2]], int(l[3]), int(l[4]))) else: string = '' for i in range(len(l)): if i > 2: string += l[i] + ' ' stables.append(enviroment.StableObject((int(l[1]), int(l[2])), txt.render(string, (200,200,200)))) # make gravity circles for p in planets: c = 2*p.gravity_radius*math.pi points = int(round(c/PLANET.POINT_DISTANCE)) angle_dist = 360/float(points) for x in range(points): stables.append(enviroment.RelativeStableObject('a', img_dict['dash'], p, x*angle_dist, p.gravity_radius - p.circle.radius - 8)) return player.Player(player_start, planets, platforms, img_dict), planets, platforms, checkpoints, monsters, boosters, stars, bullets, ports, stables, parts, settings
def start(jukebox, level, first_time = False): menu = False if level == "menu.txt": menu = True next_level = None pygame.display.set_caption("Moon's moons") screen = pygame.display.set_mode((SCREEN.WIDTH, SCREEN.HEIGHT)) #, pygame.FULLSCREEN) if first_time and level in STORY: story(screen, STORY[level]) dim = screen.copy().convert_alpha() dim.fill((0,0,0,200)) screen_rect = screen.get_rect() background = data.load_image('background.png') img_dict = data.load_images() overlay = hud.Hud(SCREEN.WIDTH, SCREEN.HEIGHT, img_dict) pl, planets, platforms, checkpoints, monsters, boosters, stars, bullets, ports, stables, parts, settings = load_level(level) last_checkpoint = checkpoints.pop(0).circle.center cam = camera.Camera(pl) pl.set_camera(cam) decaying = [] star_counter = 0 tries = 1 buttons = [] buttons.append(button.Button(750, 50, 'esc', data.load_image('cross.png', True), data.load_image('cross_over.png', True))) buttons.append(button.Button(680, 50, 'sound', data.load_image('sound.png', True), data.load_image('sound_over.png', True))) for c in checkpoints: c.update_angle(planets) for p in ports: p.update_angle(planets) jukebox.play_song(settings['song']) running = True start_time = last_time = pygame.time.get_ticks() clock = pygame.time.Clock() shown = False slowed = False won = False while running: clock.tick(100) for e in pygame.event.get(): if e.type == pygame.QUIT: running = False if e.type == pygame.KEYDOWN and e.key == pygame.K_ESCAPE: running = False if e.type == pygame.KEYDOWN and e.key == pygame.K_r: pl.respawn(last_checkpoint) if e.type == pygame.KEYDOWN and e.key == pygame.K_e: if shown: shown = False else: shown = True if e.type == pygame.KEYDOWN and e.key == pygame.K_s: if slowed: slowed = False else: slowed = True time = pygame.time.get_ticks() delta = time - last_time if slowed: delta /= 10 if delta > 300: delta = delta % 300 external = [0,0] for b in boosters: external = sum(external, b.get_force(pl)) state = pl.update(pygame.key.get_pressed(), delta, external) if state == 'dead': pl.respawn(last_checkpoint) tries += 1 for m in monsters: if settings['update_all']: m.update(delta) else: if m.has_line: point = m.line.start else: point = m.circle.center if cam.rect.collidepoint(point): m.update(delta) for b in bullets: b.update(delta) for b in boosters: b.update(delta) cam.update() last_time = time screen_rect.x = cam.x/10 + SCREEN.WIDTH/2 screen_rect.y = cam.y/10 + SCREEN.HEIGHT/2 screen.blit(background, (0,0), screen_rect) for ds in decaying: if ds.update(pl, delta): decaying.remove(ds) show(ds, screen, cam) for s in stars: if s.update(pl, delta): star_counter += 1 decaying.append(stars.pop(stars.index(s))) show(s, screen, cam) for p in parts: if p.update(pl, delta): decaying.append(parts.pop(parts.index(p))) show(p, screen, cam) if shown: pygame.draw.circle(screen, (150,150,150), cam.shift(p.circle.center), p.circle.radius) bullets[:] = [b for b in bullets if cam.rect.collidepoint(b.circle.center)] blen = len(bullets) - 1 for b in range(len(bullets)): c = False for p in planets: if p.circle.collide_circle(bullets[blen-b].circle): c = True break if not c: for p in platforms: if p.line.collide_circle(bullets[blen-b].circle, PLATFORM.BORDER): c = True break if c: del bullets[blen-b] blen = len(bullets) - 1 for b in range(len(bullets)): if bullets[blen-b].circle.collide_circle(pl.circle): pl.respawn(last_checkpoint) tries += 1 del bullets[blen-b] ########################## ######### DRAW ########### ########################## for s in stables: show(s, screen, cam) for p in ports: show(p, screen, cam) if len(parts) == 0 and p.circle.collide_circle(pl.circle): next_level = p.level running = False won = True for b in boosters: show(b, screen, cam) for c in checkpoints: if c.circle.collide_circle(pl.circle) and last_checkpoint != c.circle.center: last_checkpoint = c.circle.center for cp in checkpoints: cp.uncap() c.cap() c.update(delta) screen.blit(c.image, (c.image_position[0] - cam.x, c.image_position[1] - cam.y)) for b in bullets: show(b, screen, cam) for m in monsters: kill = False if m.has_line: if m.line.collide_circle(pl.circle, m.border): kill = True else: if m.circle.collide_circle(pl.circle): kill = True if kill: pl.respawn(last_checkpoint) tries += 1 show(m, screen, cam) if shown: if m.has_line: pygame.draw.line(screen, (150,50,255), cam.shift(m.line.start), cam.shift(m.line.end), m.border*2) else: pygame.draw.circle(screen, (150,50,255), cam.shift(m.circle.center), m.circle.radius) show(pl, screen, cam) if shown: pygame.draw.circle(screen, (150,50,255), cam.shift(pl.circle.center), pl.circle.radius) for p in planets: show(p, screen, cam) if shown: pygame.draw.circle(screen, (50,150,255), cam.shift(p.circle.center), p.circle.radius) for p in platforms: screen.blit(p.image, (p.image_position[0] - cam.x, p.image_position[1] - cam.y)) for b in buttons: e = b.update() if e == 'esc': running = False elif e == 'sound': jukebox.toggle() screen.blit(b.image, b.image_position) if not menu: overlay.update(delta, star_counter, tries) screen.blit(overlay.image, (0,0)) if shown: screen.blit(dim, (0,0)) pygame.display.flip() if not menu: if won: data.save_level(star_counter, tries, level) else: data.save_level(0, 0, level) return next_level
import keras from data import get_nyu_train_test_data, load_images, predict, show_images, to_multichannel from loss import depth_loss # suppress verbose os.environ['TF_CPP_MIN_LOG_LEVEL'] = '5' #------------ # Constants/HP #------------ BATCH_SIZE = 6 lr = 0.0001 EPOCHS = 1 #------------Test images snippet import glob image_list = glob.glob('*.jpg') test_images = load_images(image_list) print(test_images.shape) show_images(test_images) #------------ ''' Create Model with Decoder ''' model = create_model() ''' Create Train and Test Generators Returns Data_generator objects for Keras ''' train_generator, test_generator = get_nyu_train_test_data(BATCH_SIZE) print('\n\nGenerators Ready:', train_generator, test_generator)
layers = [l1,l2,l3,l4,l5, l6,l7, l8, output_layer] images = [] y = [] file_names = [] dimensions = [] train_labels = [x for x in os.listdir("train") if os.path.isdir("{0}{1}{2}".format("train", os.sep, x))] train_directories = ["{0}{1}{2}".format("train", os.sep, x) for x in train_labels] train_labels, train_directories = zip(*sorted(zip(train_labels, train_directories), key=lambda x: x[0])) for idx, folder in enumerate(train_directories): for f_name_dir in os.walk(folder): dir_images, fnames, dims = load_images(f_name_dir, img_dim=img_dim, as_grey=as_grey) images = images + dir_images y = y + [idx for x in dir_images] dimensions = dimensions + dims file_names = file_names + fnames def to_one_hot(l): out = np.zeros((len(l), len(set(l)))) for idx, label in enumerate(l): out[idx, label] = 1 return out y = to_one_hot(y) def predict(model, X_test): model.set_batch_size(batch_size)
def read_classes(): for selector in (train_selector, test_selector1, test_selector2): for klass in load_images(selector, max_per_class=64): yield klass print("--- end of selector ---")
#!/usr/bin/env python from __future__ import print_function from sklearn.decomposition import PCA import data as dat import time import argparse from sklearn.preprocessing import scale, normalize # Parse arguments parser = argparse.ArgumentParser() parser.add_argument('-t', '--path_train', help='Training File', required=True) ARGS = parser.parse_args() #Loading images start_time = time.time() train_data = dat.load_images(ARGS.path_train) print('Loaded in ' + str(time.time()-start_time) + 's') # Normalizing train_data = train_data/255 train_data = scale(train_data) train_data = normalize(train_data, norm = 'l2') print('Starting PCA') # Use PCA with all components to get variance values start_time = time.time() pca = PCA(n_components = 89401) pca.fit_transform(train_data) variances = pca.explained_variance_ratio_ print('PCA in ' + str(time.time()-start_time) + 's')
'--channels', metavar="n", type=int, nargs=None, help="number of hidden CNN channels in columns, default: 14", default=14) parser.add_argument('--classes-per-col', metavar="n", type=int, nargs=None, help="number of classes per columnm default: 16", default=16) args = vars(parser.parse_args()) # load validation data class_images = load_images(test_selector1, max_per_class=32) validation = [torch.cat(class_images[i], dim=0) for i in range(4)] # load main training set class_images = load_images(train_selector, max_per_class=256) print("number of classes:", len(class_images)) # a function to generate processed batch def random_batch(net: torch.nn.Module, class_indices: list) -> torch.Tensor: inp = torch.cat([random.choice(class_images[j]) for j in class_indices], dim=0) return net(inp) # ensemble of neural columns
def train(): model = Model().cuda() criterion = torch.nn.CrossEntropyLoss().cuda() optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate) #optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, momentum=0.9, weight_decay=1e-4) #scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda epoch: 0.1**(epoch+1)) #start_epoch, start_key, _, _ = load_checkpoint('latest', model, optimizer) start_epoch, start_key = 0, None train_data, val_data = load_images(['train', 'validate']) data_keys = list(train_data.keys()) if start_key: start_key = data_keys.index(start_key) + 1 else: start_key = 0 print('model parameter size (GB):', model_size(model) / 1024**3) #print(validation_loss(model, criterion, val_data)) #exit(1) #import pdb;pdb.set_trace() n_samples = 0 for epoch in range(start_epoch, n_epochs): print('starting epoch', epoch) #scheduler.step() for key, batch in train_data.items(): if data_keys.index(key) < start_key: continue batch_size = len(batch[0]) #print(key, batch[1].shape) #continue for i in range(0, batch_size, N): start = time.time() x = batch[0][i:i + N].cuda() y = batch[1][i:i + N][0].cuda() #x = batch[0][i:i+N] #y = batch[1][i:i+N] optimizer.zero_grad() y_pred = model(x, y) loss = criterion(y_pred, y) loss.backward() optimizer.step() total_norm = 0. for p in model.parameters(): param_norm = p.grad.data.norm(2) total_norm += param_norm.item()**2 print('grad_norm', param_norm) total_norm = total_norm**(1. / 2) print('total_norm', total_norm) end = time.time() print('epoch: {} {} {}/{}, loss: {}'.format( epoch, key, i, batch_size, loss.item())) print('time step:', end - start) n_samples += N save_checkpoint(n_samples, (epoch, key, i, loss), model, optimizer)
import preprocessing as pp import data as dat import tsne as t import time path_train = '/home/barbara/Documents/Trabalho/train-jpg/' new_path_train = '/home/barbara/Documents/Trabalho/train-jpg_resized/' path_label = '/home/barbara/Documents/Trabalho/train_v2.csv' path_tsne = 'tsne10' #Resizing images 32x32 #li.resize_images(path_train, new_path_train, 32) #Loading images start_time = time.time() data = dat.load_images(new_path_train) print 'Loaded in ' + str(time.time()-start_time) + 's' # Preprocessing data = data.astype('float32') data /= 255. data = pp.st_scale(data) data = pp.normalize_l2(data) data, i = pp.PCA_reduction(data, 0, 10) #Loading labels label = dat.load_labels(path_label) #Generate t-SNE start_time = time.time() t.generate_tsne(path_tsne, data, label)
""" Example program that displays a random dog and cat. Expects that the files "dog.npy" and "cat.npy" have been downloaded to the "data/" directory. """ import data import random import matplotlib.pyplot as plt dog = data.load_images("dog.npy") cat = data.load_images("cat.npy") data.show_image(random.choice(dog)) data.show_image(random.choice(cat))