def deploy_two_networks_in_vm(request, template_name): atts = [] atts.append({'port': '8080'}) template = model.Template(template_name) product = model.Product('tomcat', 'io.murano.apps.apache.Tomcat', atts) net1 = model.Network('new', False) net2 = model.Network("node-int-net-01", True) inst = model.Instance('ubuntu', 'Ubuntu14.04init_deprecated', '2', '', False, [net1, net2]) service = model.Service(product.name, product) service.add_instance(inst) template.add_service(service) request.deploy_template(template)
def __init__(self, gpu=0, checkpoint=None): self.cuda = torch.cuda.is_available() self.gpu = gpu self.device = torch.device(f"cuda:{self.gpu}" if self.cuda else "cpu") self.model = model.Network(161 * 3, 100, 86 * 3).to(self.device) root = '/Users/ferran_2020/TFG/Neural_RH_Inversion/' print("Reading Enhanced_tau_530 - tau") tmp = io.readsav(f'{root}Enhanced_tau_530.save') self.T_tau = tmp['tempi'].reshape((86, 504 * 504)) self.Pe_tau = tmp['epresi'].reshape((86, 504 * 504)) print("Reading Enhanced_tau_530 - z") tmp = io.readsav(f'{root}Enhanced_530_optiona_rh.save') self.T_z = tmp['tg'] #.reshape((161, 504*504)) self.Pg_z = tmp['pg'] #.reshape((161, 504*504)) self.z = tmp['z'] / 1e3 import pickle filename = '/Users/ferran_2020/TFG/Neural_RH_Inversion/checkpoint_Whole.dict' with open(filename, 'rb') as f: s = pickle.load(f) f.close() self.model.load_state_dict(s[0])
def __init__(self, batch_size, validation_split=0.2, gpu=0, smooth=0.05): self.cuda = torch.cuda.is_available() self.gpu = gpu self.smooth = smooth self.device = torch.device(f"cuda:{self.gpu}" if self.cuda else "cpu") if (NVIDIA_SMI): nvidia_smi.nvmlInit() self.handle = nvidia_smi.nvmlDeviceGetHandleByIndex(self.gpu) print("Computing in {0} : {1}".format(self.device, nvidia_smi.nvmlDeviceGetName(self.handle))) self.batch_size = batch_size self.validation_split = validation_split kwargs = {'num_workers': 2, 'pin_memory': False} if self.cuda else {} self.model = model.Network(95*3+1, 100, 2).to(self.device) print('N. total parameters : {0}'.format(sum(p.numel() for p in self.model.parameters() if p.requires_grad))) self.dataset = Dataset() # Compute the fraction of data for training/validation idx = np.arange(self.dataset.n_training) self.train_index = idx[0:int((1-validation_split)*self.dataset.n_training)] self.validation_index = idx[int((1-validation_split)*self.dataset.n_training):] # Define samplers for the training and validation sets self.train_sampler = torch.utils.data.sampler.SubsetRandomSampler(self.train_index) self.validation_sampler = torch.utils.data.sampler.SubsetRandomSampler(self.validation_index) # Data loaders that will inject data during training self.train_loader = torch.utils.data.DataLoader(self.dataset, batch_size=self.batch_size, sampler=self.train_sampler, shuffle=False, **kwargs) self.validation_loader = torch.utils.data.DataLoader(self.dataset, batch_size=self.batch_size, sampler=self.validation_sampler, shuffle=False, **kwargs)
def __init__(self, basis_wavefront='zernike', npix_image=128, n_modes=44, n_frames=10, gpu=0, smooth=0.05,\ batch_size=16, arguments=None): self.pixel_size = 0.0303 self.telescope_diameter = 256.0 # cm self.central_obscuration = 51.0 # cm self.wavelength = 8000.0 self.n_frames = n_frames self.batch_size = batch_size self.arguments = arguments self.basis_for_wavefront = basis_wavefront self.npix_image = npix_image self.n_modes = n_modes self.gpu = gpu self.cuda = torch.cuda.is_available() self.device = torch.device(f"cuda:{self.gpu}" if self.cuda else "cpu") # Ger handlers to later check memory and usage of GPUs if (NVIDIA_SMI): nvidia_smi.nvmlInit() self.handle = nvidia_smi.nvmlDeviceGetHandleByIndex(self.gpu) print("Computing in {0} : {1}".format( gpu, nvidia_smi.nvmlDeviceGetName(self.handle))) # Define the neural network model print("Defining the model...") self.model = model.Network(device=self.device, n_modes=self.n_modes, n_frames=self.n_frames, \ pixel_size=self.pixel_size, telescope_diameter=self.telescope_diameter, central_obscuration=self.central_obscuration, wavelength=self.wavelength,\ basis_for_wavefront=self.basis_for_wavefront, npix_image=self.npix_image).to(self.device) print('N. total parameters : {0}'.format( sum(p.numel() for p in self.model.parameters() if p.requires_grad))) kwargs = {'num_workers': 1, 'pin_memory': False} if self.cuda else {} # Data loaders that will inject data during training self.training_dataset = Dataset( filename='/scratch1/aasensio/fastcam/training_small.h5', n_training_per_star=1000, n_frames=self.n_frames) self.train_loader = torch.utils.data.DataLoader( self.training_dataset, batch_size=self.batch_size, shuffle=True, drop_last=True, **kwargs) self.validation_dataset = Dataset( filename='/scratch1/aasensio/fastcam/validation_small.h5', n_training_per_star=100, n_frames=self.n_frames, validation=True) self.validation_loader = torch.utils.data.DataLoader( self.validation_dataset, batch_size=self.batch_size, shuffle=True, drop_last=True, **kwargs)
def deploy_blueprint_template(request, template_name, product): template = model.Template(template_name) net = model.Network("node-int-net-01", True) inst = model.Instance('ubuntu', 'Ubuntu14.04init_deprecated', '2', '', False, [net]) service = model.Service(product.name, product) service.add_instance(inst) template.add_service(service) request.deploy_template(template)
def experiment(hyperparameter): train_accuracies = [] test_accuracies = [] train, test = chainer.datasets.split_dataset_random(dataset, 3000) train_x, train_t = chainer.dataset.concat_examples(train) test_x, test_t = chainer.dataset.concat_examples(test) network = model.Network(hyperparameter) optimizer = chainer.optimizers.Adam().setup(network) best_accuracy = 0 for max_epoch, batch_size in hyperparameter['iteration']: iterator = chainer.iterators.SerialIterator(train, batch_size) while iterator.epoch < max_epoch: batch = iterator.next() batch_x, batch_t = chainer.dataset.concat_examples(batch) batch_y = network(batch_x) loss = chainer.functions.sigmoid_cross_entropy(batch_y, batch_t) network.cleargrads() loss.backward() optimizer.update() if iterator.is_new_epoch: with chainer.using_config('train', False): with chainer.no_backprop_mode(): train_y = network(train_x) train_accuracy = chainer.functions.binary_accuracy( train_y, train_t) test_y = network(test_x) test_accuracy = chainer.functions.binary_accuracy( test_y, test_t) text = 'Epoch :' + format( iterator.epoch, '4d') + ', TrainAccuracy : ' + format( train_accuracy.data, '1.4f') + ', TestAccuracy : ' + format( test_accuracy.data, '1.4f') print(text) train_accuracies.append(train_accuracy.data) test_accuracies.append(test_accuracy.data) if best_accuracy < test_accuracy.data: best_accuracy = test_accuracy.data best_network = copy.deepcopy(network) result = {} result['train_accuracies'] = train_accuracies result['test_accuracies'] = test_accuracies result['network'] = best_network result['accuracy'] = best_accuracy return result
def initialize_model(path): global network, sess, initialized assert not initialized network = model.Network("net/", build_training=False) sess = tf.InteractiveSession() sess.run(tf.initialize_all_variables()) model.sess = sess model.load_model(network, path) initialized = True
def __init__(self, env, optimizer): self.env = env self.episode_count = 10000 self.ac_space = env.action_space self.ob_space = env.ob_space self.model = model.Network(self.ob_space, self.ac_space) self.optimizer = optimizer
def interface(): settings.logger.info("Starting user interface...") n = model.Network() n.build() n.load_weights() g = generator.Generator(None, None) g.load_indexes() g.load_genre_binarizer() get_predictions(g, n)
def train(): global_step = tf.Variable(0, trainable=False) dataset = coco_input.get_dataset() labels, images = dataset.train_input() network = model.Network(is_train=True) logits = network.inference(images) for var in tf.trainable_variables(): tf.histogram_summary(var.op.name, var) entropy, loss = model.get_loss(labels, logits) lr, opt = get_opt(loss, global_step) summary_op = tf.merge_all_summaries() #gpu_options = tf.GPUOptions(allow_growth=True) gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5) with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess: init = tf.initialize_all_variables() sess.run(init) if FLAGS.dir_pretrain is not None: saver = tf.train.Saver(model.get_pretrain_variables()) restore_model(saver, sess) summary_writer = tf.train.SummaryWriter("log", sess.graph) tf.train.start_queue_runners(sess=sess) saver = tf.train.Saver(model.get_restore_variables()) for num_iter in range(1, FLAGS.max_steps + 1): start_time = time.time() value_entropy, value_loss, value_lr, _ = sess.run( [entropy, loss, lr, opt]) duration = time.time() - start_time assert not np.isnan(value_loss), 'Model diverged with loss = NaN' if num_iter % 10 == 0: num_examples_per_step = FLAGS.batch_size examples_per_sec = num_examples_per_step / duration sec_per_batch = float(duration) print( "step = {} entropy = {:.2f} loss = {:.2f} ({:.1f} examples/sec; {:.1f} sec/batch)" .format(num_iter, value_entropy, value_loss, examples_per_sec, sec_per_batch)) if num_iter % 100 == 0: summary_str = sess.run(summary_op) summary_writer.add_summary(summary_str, num_iter) if num_iter % 1000 == 0: print "lr = {:.2f}".format(value_lr) checkpoint_path = os.path.join(FLAGS.dir_parameter, 'model.ckpt') saver.save(sess, checkpoint_path, global_step=num_iter)
def __init__(self,env,optimizer): self.env=env self.episode_count=10000 self.ac_space=env.action_space self.ob_space=env.ob_space self.model=model.Network(self.ob_space,self.ac_space) self.optimizer=optimizer self.params=self.model.compile(optimizer,loss='mean')
def __init__(self, state_size, action_size, h1, h2, h3, n_agents=2): a1 = h1 #int(h1/2) a2 = h2 #int(h2/2) a3 = h3 #int(h3*4) self.action_size = action_size self.actor_local = model.Network(input_dim=state_size, h1=a1, h2=a2, h3=a3, output_dim=action_size, actor=True).to(device) self.actor_target = model.Network(input_dim=state_size, h1=a1, h2=a2, h3=a3, output_dim=action_size, actor=True).to(device) self.actor_optimizer = optim.Adam(self.actor_local.parameters(), lr=lr_act) critic_input = n_agents * (state_size + action_size) self.critic_local = model.Network(input_dim=critic_input, h1=h1, h2=h2, h3=h3, output_dim=1).to(device) self.critic_target = model.Network(input_dim=critic_input, h1=h1, h2=h2, h3=h3, output_dim=1).to(device) self.critic_optimizer = optim.Adam(self.critic_local.parameters(), lr=lr_crt) self.noise = OUNoise(action_size, scale=1.0) self.hard_update(self.actor_target, self.actor_local) self.hard_update(self.critic_target, self.critic_local) self.tau = 1e-3
def deploy_vm_no_exiting_network(request, template_name, network): atts = [] atts.append({'port': '8080'}) template = model.Template(template_name) product = model.Product('tomcat', 'io.murano.apps.apache.Tomcat', atts) net = model.Network(network, False) inst = model.Instance('ubuntu', 'Ubuntu14.04init_deprecated', '2', '', False, [net]) service = model.Service(product.name, product) service.add_instance(inst) template.add_service(service) request.deploy_template(template)
def predict_from_pretrained(): root = Tk() path=os.path.abspath("01.input") root.filename = filedialog.askopenfilename(initialdir=path, title="choose your file", filetypes=(("all files", "*.*"), ("png files", "*.png"))) print("Load image") img = skimage.io.imread(root.filename) input=img img=img.astype(np.float) M=img.shape[0] N=img.shape[1] print("Build Network") sess = tf.Session() images = tf.placeholder(tf.float32, [1, M, N, 3]) train_mode = tf.placeholder(tf.bool) rime = model.Network('./rime_v_new5_18000.npy', trainable=False) rime.build(images) sess.run(tf.global_variables_initializer()) if img.shape == (M, N, 3): img = img.reshape((1, M, N, 3)) x_batch = img / 255. print("Test Network") _, _ = sess.run([rime.F_out, rime.att], feed_dict={train_mode: False, images: x_batch}) print("Run Network") s = time.time() result, att = sess.run([rime.F_out, rime.att], feed_dict={train_mode: False, images: x_batch}) e = time.time() print("Proc. Time:", e - s) output_image = np.minimum(np.maximum(result, 0.0), 1) result = np.reshape(output_image[0, :, :, :], [M, N, 3]) image_name=os.path.basename(root.filename) print("save result") skimage.io.imsave("./02.Results/out_" + image_name, result) print("Visualization") out=result*255 out = out.astype(np.uint8) region = input.astype(np.float) region=region/255 region[:,:,0]=region[:,:,0]*0.3+att[0, :, :, 0]*0.7 region[:,:,1]=region[:,:,1]*0.3+(1-att[0, :, :, 0])*0.7 region[:,:,2]=region[:,:,2]*0.3 skimage.io.imsave("./02.Results/region_" + image_name, region) region = region*255 visual=np.zeros((M,N*3,3),np.uint8) visual[:, 0:N, :]=input visual[:, N:N*2, :]=out visual[:, N*2:N*3, :]=region.astype(np.uint8) skimage.io.imshow(visual) plt.show()
def __init__(self, gpu=0, checkpoint=None, K=3, model_class='conv1d'): self.cuda = torch.cuda.is_available() self.gpu = gpu self.device = torch.device(f"cuda:{self.gpu}" if self.cuda else "cpu") self.K = K if (checkpoint is None): files = glob.glob('trained/*.pth') self.checkpoint = max(files, key=os.path.getctime) else: self.checkpoint = '{0}'.format(checkpoint) if (model_class == 'conv1d'): self.model = model.Network(K=self.K, L=32, device=self.device, model_class=model_class).to(self.device) if (model_class == 'conv2d'): self.model = model.Network(K=self.K, L=32, NSIDE=16, device=self.device, model_class=model_class).to(self.device) print('N. total parameters : {0}'.format( sum(p.numel() for p in self.model.parameters() if p.requires_grad))) print("=> loading checkpoint '{}'".format(self.checkpoint)) checkpoint = torch.load(self.checkpoint, map_location=lambda storage, loc: storage) self.model.load_state_dict(checkpoint['state_dict']) print("=> loaded checkpoint '{}'".format(self.checkpoint)) print(f"rho : {torch.exp(checkpoint['state_dict']['rho'])}") print(f"theta : {torch.exp(checkpoint['state_dict']['theta'])}")
def deploy_orion_chef(request, template_name): atts = [] atts.append({'port': '1026'}) product = model.Product('orionchef', 'io.murano.conflang.chef.GitChef', atts) template = model.Template(template_name) net = model.Network("node-int-net-01", True) inst = model.Instance('centos', 'CentOS-6.5init_deprecated', '2', 'demo4', False, [net]) service = model.Service(product.name, product) service.add_instance(inst) template.add_service(service) request.deploy_template(template)
def train_network(): synopses, genres = load_preprocessed_data( settings.INPUT_PREPROCESSED_FILMS) X_train, X_val, y_train, y_val = train_test_split( synopses, genres, test_size=settings.VALIDATION_SPLIT) network = model.Network() network.load_generators(X_train, X_val, y_train, y_val) # Synopses and genres as parameter network.load_embeddings() network.build() #network.load_weights() network.compile() network.train()
def __init__(self, sim, parent=None): self.sim = sim self.pos = np.array([0.0, 0.0]) self.dir = random.random() * 2 * math.pi self.speed = 0 self.max_speed = 4 self.rad = 10 self.move_cost = 0 #0.002 self.fitness = 0 self.decay_rate = 0.95 self.fov = math.pi / 3 self.n_bins = 8 self.eye_dist = [0.0] * self.n_bins self.eye_r = [0.0] * self.n_bins self.eye_g = [0.0] * self.n_bins self.eye_decay = [0.0] * self.n_bins if parent is None: self.brain = model.Network(self) else: self.brain = model.Network(self, parent.brain.mutate())
def __init__(self, global_network, optimizer, global_ep, global_ep_r, res_queue, worker_name, pybullet_client, urdf_path): super(Worker, self).__init__() self.device = 'cpu' if torch.cuda.is_available(): self.device = 'cuda' self.worker_name = 'worker_%i' % worker_name self.g_ep, self.g_ep_r, self.res_queue = global_ep, global_ep_r, res_queue self.global_network = global_network self.optimizer = optimizer # self.env = gym.make('Pendulum-v0').unwrapped robot = snake.Snake(pybullet_client, urdf_path) self.env = SnakeGymEnv(robot) self.local_network = model.Network(self.env.observation_space.shape[0], self.env.action_space.shape[0])
def evaluate(): with tf.Graph().as_default() as g, tf.device("/gpu:0"): dataset = coco_input.get_dataset() labels, images = dataset.validate_input() network = model.Network(is_train=False) logits = network.inference(images) entropy, _ = model.get_loss(labels, logits) top_k_op = tf.nn.in_top_k(logits, labels, 1) summary_writer = tf.train.SummaryWriter(FLAGS.dir_log_val, g) while True: eval_once(summary_writer, top_k_op, entropy) time.sleep(FLAGS.eval_interval_secs)
def evaluate(): with tf.Graph().as_default() as g, tf.device("/gpu:0"): FLAGS.batch_size = 100 images, labels = mnist_input.validate_input() label_vector = tf.one_hot(labels, 10, dtype=tf.float32) network = model.Network() logits = network.inference(images) top_k_op = tf.nn.in_top_k(logits, labels, 1) entropy, loss = model.get_loss(label_vector, logits) summary_writer = tf.train.SummaryWriter(FLAGS.dir_log, g) while True: eval_once(summary_writer, top_k_op, entropy) time.sleep(FLAGS.eval_interval_secs)
def main(): model = M.Network() training_msgs = np.array([[0, 0, 0], [1, 1, 1], [0, 1, 0], [1, 0, 1], [1, 1, 0], [0, 0, 1], [0, 1, 1], [1, 0, 0]]) X = one_hot_encoding(training_msgs.copy()) y = encode_seqs(training_msgs.copy()) validation_msgs = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1]]) Xv = one_hot_encoding(validation_msgs.copy()) yv = encode_seqs(validation_msgs.copy()) history = M.fit(model, X, y, validation_data=(Xv, yv), epochs=300)
def main(): # set GPU ID os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id cudnn.benchmark = True # check save path save_path = args.save_path if not os.path.exists(save_path): os.makedirs(save_path) # make dataloader train_loader, test_loader = dataset.get_loader(args) # set model model = mlp.Network().cuda() # set criterion criterion = nn.BCEWithLogitsLoss().cuda() # set optimizer (default:sgd) optimizer = optim.SGD(model.parameters(), lr=0.0001, momentum=0.9, weight_decay=0.0001, nesterov=True) # set scheduler scheduler = MultiStepLR(optimizer, milestones=[80,120], gamma=0.1) # make logger train_logger = utils.Logger(os.path.join(save_path, 'train.log')) test_logger = utils.Logger(os.path.join(save_path, 'test.log')) # Start Train for epoch in range(1, args.epochs + 1): # scheduler scheduler.step() # Train train(train_loader, model, criterion, optimizer, epoch, train_logger) validate(test_loader, model, criterion, epoch, test_logger, 'test') # Save Model each epoch if (epoch == int(1)) or (epoch == int(args.epochs)): torch.save(model.state_dict(), os.path.join(save_path, '{0}_{1}.pth'.format('model', epoch))) # Finish Train # Draw Plot plot_curves.draw_plot(save_path)
def __init__(self, gpu=0, checkpoint=None): self.cuda = torch.cuda.is_available() self.gpu = gpu self.device = torch.device(f"cuda:{self.gpu}" if self.cuda else "cpu") self.model = model.Network(95 * 3 + 1, 100, 2).to(self.device) root = '/Users/ferran_2020/TFG/Neural_RH_Inversion/' print("Reading Enhanced_tau_530 - tau") tmp = io.readsav(f'{root}Enhanced_tau_530.save') self.T_tau = tmp['tempi'] #.reshape((86, 504*504)) self.Pe_tau = tmp['epresi'] #.reshape((86, 504*504)) self.tau = tmp['tau3'] / 5.0 print("Reading Enhanced_tau_530 - z") tmp = io.readsav(f'{root}Enhanced_530_optiona_rh.save') self.T_z = tmp['tg'] #.reshape((161, 504*504)) self.Pg_z = tmp['pg'] #.reshape((161, 504*504)) self.z = tmp['z'] / 1e3
def __init__(self, basis_wavefront='zernike', npix_image=128, n_modes=44, n_frames=10, gpu=0, corner=(0,0),\ batch_size=16, checkpoint=None): self.pixel_size = 0.0303 self.telescope_diameter = 256.0 # cm self.central_obscuration = 51.0 # cm self.wavelength = 8000.0 self.n_frames = n_frames self.batch_size = batch_size self.basis_for_wavefront = basis_wavefront self.npix_image = npix_image self.n_modes = n_modes self.gpu = gpu self.cuda = torch.cuda.is_available() self.device = torch.device(f"cuda:{self.gpu}" if self.cuda else "cpu") # Ger handlers to later check memory and usage of GPUs if (NVIDIA_SMI): nvidia_smi.nvmlInit() self.handle = nvidia_smi.nvmlDeviceGetHandleByIndex(self.gpu) print("Computing in {0} : {1}".format(gpu, nvidia_smi.nvmlDeviceGetName(self.handle))) # Define the neural network model print("Defining the model...") self.model = model.Network(device=self.device, n_modes=self.n_modes, n_frames=self.n_frames, \ pixel_size=self.pixel_size, telescope_diameter=self.telescope_diameter, central_obscuration=self.central_obscuration, wavelength=self.wavelength,\ basis_for_wavefront=self.basis_for_wavefront, npix_image=self.npix_image).to(self.device) print('N. total parameters : {0}'.format(sum(p.numel() for p in self.model.parameters() if p.requires_grad))) if (checkpoint is None): files = glob.glob('trained/*.pth') self.checkpoint = max(files, key=os.path.getctime) else: self.checkpoint = '{0}'.format(checkpoint) print("=> loading checkpoint '{}'".format(self.checkpoint)) tmp = torch.load(self.checkpoint, map_location=lambda storage, loc: storage) self.model.load_state_dict(tmp['state_dict']) print("=> loaded checkpoint '{}'".format(self.checkpoint))
def train(): global_step = tf.Variable(0, trainable=False) image, label = mnist_input.train_input() network = model.Network() logits = network.inference(image, is_train=True) for var in tf.trainable_variables(): tf.histogram_summary(var.op.name, var) entropy, loss = model.get_loss(label, logits) lr, opt = get_opt(loss, global_step) saver = tf.train.Saver(tf.trainable_variables()) summary_op = tf.merge_all_summaries() gpu_options = tf.GPUOptions(allow_growth=True) with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess: init = tf.initialize_all_variables() sess.run(init) summary_writer = tf.train.SummaryWriter("log", sess.graph) tf.train.start_queue_runners(sess=sess) for num_iter in range(1, 1000000): value_entropy, value_loss, value_lr, _ = sess.run( [entropy, loss, lr, opt]) if num_iter % 100 == 0: print "lr = {} entropy = {} loss = {}".format( value_lr, value_entropy, value_loss) summary_str = sess.run(summary_op) summary_writer.add_summary(summary_str, num_iter) if num_iter % 1000 == 0: checkpoint_path = os.path.join(FLAGS.dir_parameter, 'model.ckpt') saver.save(sess, checkpoint_path, global_step=num_iter)
def __init__(self, gpu=0, checkpoint=None): self.cuda = torch.cuda.is_available() self.gpu = gpu self.device = torch.device(f"cuda:{self.gpu}" if self.cuda else "cpu") if (checkpoint is None): files = glob.glob('trained/*.pth') self.checkpoint = max(files, key=os.path.getctime) else: self.checkpoint = '{0}'.format(checkpoint) self.model = model.Network(100, 40, 2).to(self.device) print('N. total parameters : {0}'.format(sum(p.numel() for p in self.model.parameters() if p.requires_grad))) print("=> loading checkpoint '{}'".format(self.checkpoint)) checkpoint = torch.load(self.checkpoint, map_location=lambda storage, loc: storage) self.model.load_state_dict(checkpoint['state_dict']) print("=> loaded checkpoint '{}'".format(self.checkpoint))
def run_batch_predictions(): settings.logger.info("Starting batch predictions...") n = model.Network() n.build() n.load_weights() g = generator.Generator(None, None) g.load_indexes() g.load_genre_binarizer() possible_genres = list(g.mlb.classes_) possible_genres = [ 'Comedia', 'Documental', 'Romance', 'Thriller', 'Acción' ] possible_genres = ['Romance', 'Acción'] for i in range(1000): settings.logger.info("Sample " + str(i) + "________________") n_genres = random.randint(1, 3) input_genres = random.sample(possible_genres, n_genres) settings.logger.info("Input genres:" + ', '.join(input_genres)) encoded_genres = g.mlb.transform([input_genres]) synG = get_predictions_beam(g, n, encoded_genres, 4, ['La']) settings.logger.info("BEAM synopsis: " + synG)
def deploy_orion_docker(request, template_name): template = model.Template(template_name) net = model.Network("node-int-net-01", True) inst = model.Instance('ubuntu', 'Ubuntu14.04init_deprecated', '2', '', False, [net]) product = model.Product('docker', 'io.murano.apps.docker.DockerStandaloneHost') service = model.Service(product.name, product) service.add_instance(inst) atts = [] atts.append({'publish': True}) atts.append({'host': product.id}) product = model.Product('dockerorion', 'io.murano.apps.docker.DockerOrion', atts) service2 = model.Service(product.name, product) template.add_service(service) template.add_service(service2) request.deploy_template(template)
def main(): dataset = get_data() data = dataset["train"] goal = dataset["train_goal"] composition = [inputSize, 500, 100, outputSize] # the network composition net = model.Network(composition, dropout=False) net.eta = lr # dropbox_.download(network + ".pkl", network + ".pkl") # dropbox_.download("config/score.txt", "config/score.txt") # net.load(network) # load the network size = len(data) batch = 100000 # print("starting...") count = 0 while True: count += 1 err = 0 start = timer() for i in range(size): err += net.train(data[i], goal[i]) prograssBar(i + 1, size) if (i + 1) % batch == 0: print("") print( str(net.validate(dataset["test"], dataset["test_goal"])) + "%") net.save(network) # dropbox_.upload(network + ".pkl", network + ".pkl", large=True) searchThanConv(net, count) """