def main(): parser = build_parser() options = parser.parse_args() check_opts(options) #check参数有没有问题 style_target = get_img(options.style) content_targets = list_files(options.train_path) #指定参数 kwargs = { "epochs": options.epochs, "print_iterations": options.checkpoint_iterations, "batch_size": options.batch_size, "checkpoint_dir": os.path.join(options.checkpoint_dir, 'fns.ckpt'), "learning_rate": options.learning_rate } args = [ content_targets, style_target, options.content_weight, options.style_weight, options.tv_weight, options.vgg_path ] #注意for循环, i = iterations for preds, losses, i, epoch in optimize(*args, **kwargs): #提出来单个 #need more than 3 values to unpack style_loss, content_loss, tv_loss, loss = losses print('Epoch %d, Iteration: %d, Loss: %s' % (epoch, i, loss)) to_print = (style_loss, content_loss, tv_loss) print('style: %s, content: %s, tv: %s' % to_print)
def main(): parser = build_parser() options = parser.parse_args() check_opts(options) style_target = get_img(options.style) kwargs = { 'slow': options.slow, 'epochs': options.epochs, 'print_iterations': options.checkpoint_iterations, 'batch_size': options.batch_size, 'save_path': os.path.join(options.checkpoint_dir, 'fns.ckpt'), 'learning_rate': options.learning_rate } args = [ content_targets, style_target, options.content_weight, options.style_weight, options.tv_weight, options.vgg_path ] for preds, losses, i, epoch in optimize(*args, **kwargs): style_loss, content_loss, loss = losses print
def main(): parser = build_parser() options = parser.parse_args() check_opts(options) style_target = get_img(options.style) if not options.slow: content_targets = _get_files(options.train_path) elif options.test: content_targets = [options.test] kwargs = { "slow": options.slow, "epochs": options.epochs, "print_iterations": options.checkpoint_iterations, "batch_size": options.batch_size, "save_path": os.path.join(options.checkpoint_dir, 'fns.ckpt'), "learning_rate": options.learning_rate, "checkpoint_restore": options.checkpoint_restore } if options.slow: if options.epochs < 10: kwargs['epochs'] = 1000 if options.learning_rate < 1: kwargs['learning_rate'] = 1e1 args = [ content_targets, style_target, options.content_weight, options.style_weight, options.tv_weight, options.vgg_path ] for preds, losses, i, epoch in optimize(*args, **kwargs): style_loss, content_loss, tv_loss, loss = losses print('Epoch %d, Iteration: %d, Loss: %s' % (epoch, i, loss)) to_print = (style_loss, content_loss, tv_loss) print('style: %s, content:%s, tv: %s' % to_print) if options.test: assert options.test_dir is not False preds_path = '%s/%s_%s.png' % (options.test_dir, epoch, i) if not options.slow: ckpt_dir = os.path.dirname(options.checkpoint_dir) evaluate.ffwd_to_img(options.test, preds_path, options.checkpoint_dir) else: save_img(preds_path, img) ckpt_dir = options.checkpoint_dir cmd_text = 'python evaluate.py --checkpoint %s ...' % ckpt_dir print("Training complete. For evaluation:\n `%s`" % cmd_text)
def main() -> None: # region Parse command line parser = build_parser() opts, args = parser.parse_args() if opts.filename is None and len(args) > 0: opts.filename = args.pop(0) if opts.output is None and len(args) > 0: opts.output = args.pop(0) # endregion if opts.version: print_version() exit(0) content = read_input_code(opts.filename) root = RedBaron(content) optimize(root, opts) write_output_code(opts.output, root.dumps())
def main(): parser = build_parser() options = parser.parse_args() check_opts(options) content_targets = _get_files(options.train_path) clear_content_targets = _get_files(options.train_path_) # random.shuffle(content_targets) # 乱序 kwargs = { "epochs":options.epochs, "print_iterations":options.checkpoint_iterations, "batch_size":options.batch_size, "save_path":os.path.join(options.checkpoint_dir,''), "learning_rate":options.learning_rate } args = [ content_targets, clear_content_targets, options.content_weight, options.style_weight, options.tv_weight, options.vgg_path ] for preds, losses, i, epoch in optimize(*args, **kwargs): style_loss, content_loss, tv_loss, loss = losses print('Epoch %d, Iteration: %d, Loss: %s' % (epoch, i, loss)) to_print = (style_loss, content_loss, tv_loss) print('style: %s, content:%s, tv: %s' % to_print) if options.test: assert options.test_dir != False preds_path = '%s/%s_%s.png' % (options.test_dir,epoch,i) ckpt_dir = os.path.dirname(options.checkpoint_dir) evaluate.ffwd_to_img(options.test,preds_path, options.checkpoint_dir) ckpt_dir = options.checkpoint_dir cmd_text = 'python evaluate.py --checkpoint %s ...' % ckpt_dir print("Training complete. For evaluation:\n `%s`" % cmd_text)
def main(): parser = build_parser() options = parser.parse_args() if not os.path.exists(options.checkpoint_dir): os.makedirs(options.checkpoint_dir) with open('command.txt', 'w+') as out: out.write(' '.join(sys.argv)) size = (options.size, options.size) db = h5py.File(options.train_path, 'r') content_targets = db['train2014']['images'] kwargs = { "epochs": options.epochs, "period": options.checkpoint_iterations, "batch_size": options.batch_size, "save_path": os.path.join(options.checkpoint_dir, 'fns.ckpt'), "learning_rate": options.learning_rate, } if options.checkpoint_model is not False: kwargs["checkpoint_model"] = options.checkpoint_model args = [ content_targets, options.vgg_path, size, ] last_time = time() for epoch, i, loss in optimize(*args, **kwargs): print('Epoch %d, Iteration: %d, Loss: %1.3lf, Time: %1.3lf' % (epoch, i, loss, time() - last_time)) last_time = time()
print( f'Usando {training_size_covtype * 100} por ciento del cover type dataset' ) else: print('Usando 100 por ciento del cover type dataset') print( 'Esto puede demorar. Puede pasar como argumento la proporcion del dataset covtype que desea usar' ) print('Esto es, por ejemplo:') print('python3 run.py 0.02') training_size_covtype = 0.02 data = pandas.read_csv('./iris.csv') train, test = train_test_split(data, test_size=0.2) print("### IRIS ###") parte_a(train=train, test=test) parte_b(train=train, test=test) optimize() print("### COVER_TYPE NUMERIC (opt.csv) ###") data = pandas.read_csv('./covtype.data.opt.csv') pseudo_train, dataset = train_test_split(data, test_size=training_size_covtype) train, test = train_test_split(dataset, test_size=0.2) parte_c(train, test) print("### COVER_TYPE LOG (opt.log.csv) ###") data = pandas.read_csv('./covtype.data.opt.log.csv') pseudo_train, dataset = train_test_split(data, test_size=training_size_covtype) train, test = train_test_split(dataset, test_size=0.2) parte_c(train, test)
def main(): parser = build_parser() options = parser.parse_args() check_opts(options) if not options.slow: content_targets = _get_files(options.train_path) elif options.test: content_targets = [options.test] style_files = [] if os.path.isfile(options.style): style_files.extend(options.style) else: style_files = _get_files(options.style) for style_file in style_files: print("-------------Started to train2014 model for style '%s'" % os.path.basename(style_file)) style_target = get_img(style_file) checkpoint_dir = "checkpoint_" + os.path.splitext( os.path.basename(style_file))[0] if not os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir) test_dir = "test_" + os.path.splitext(os.path.basename(style_file))[0] if options.test: if not os.path.exists(test_dir): os.makedirs(test_dir) kwargs = { "slow": options.slow, "epochs": options.epochs, "print_iterations": options.checkpoint_iterations, "batch_size": options.batch_size, "save_path": checkpoint_dir, "learning_rate": options.learning_rate } if options.slow: if options.epochs < 10: kwargs['epochs'] = 1000 if options.learning_rate < 1: kwargs['learning_rate'] = 1e1 args = [ content_targets, style_target, options.content_weight, options.style_weight, options.tv_weight, options.vgg_path ] for preds, losses, i, epoch in optimize(*args, **kwargs): style_loss, content_loss, tv_loss, loss = losses print('Epoch %d, Iteration: %d, Loss: %s' % (epoch, i, loss)) to_print = (style_loss, content_loss, tv_loss) print('style: %s, content:%s, tv: %s' % to_print) if options.test: # assert options.test_dir != False preds_path = '%s/%s_%s.png' % (test_dir, epoch, i) if not options.slow: ckpt_dir = os.path.dirname(checkpoint_dir) evaluate.ffwd_to_img(options.test, preds_path, checkpoint_dir) else: save_img(preds_path, preds) ckpt_dir = checkpoint_dir cmd_text = 'python evaluate.py --checkpoint %s ...' % ckpt_dir print("Training complete. For evaluation:\n `%s`" % cmd_text)
def process_to_file_store(self): self.add_headers() already_found_user_loop = False channel_lengths = [] if self.options.noiseless: if len(self.part_list) >= 4: self.part_list = self.part_list[:3] else: if len(self.part_list) > 4: self.part_list = self.part_list[:4] for channel in range(1, len(self.part_list) + 1): print(self.term_text.converting_channel(channel, self.part_list)) if channel == 1: channel_parser = ParseChannel1(self.options, self.song_pointer) if channel == 2: channel_parser = ParseChannel2(self.options, self.song_pointer) if channel == 3: channel_parser = ParseChannel3(self.options, self.song_pointer) if channel == 4: channel_parser = ParseChannel4(self.options, self.song_pointer) channel_part = self.xml_root.find(text.XmlText.format_part( self.part_list[channel - 1][0])) self.output_file_store.append( self.output_text.channel_label( channel)) if channel == 1: # The tempo parameter is fetched because the text isn't always # consistent with the actual tempo. This also allows for # handling less standard tempo indication. try: bpm = float(channel_part.find( './measure/direction/sound').get('tempo')) except AttributeError: if self.options.tempo is not None: bpm = self.options.tempo else: bpm = 120 print(self.term_text.no_tempo_warning) # We need the divisions so that the bpm can be adjusted. divisions = int(channel_part.find( './measure/attributes/divisions').text) tempo = notes.calc_score_tempo(bpm, divisions) channel_commands = channel_parser.get_initial_channel_commands( tempo) else: # divisions = None channel_commands = \ channel_parser.get_initial_channel_commands() self.output_file_store.extend(channel_commands) parse_staff = notes.ParseStaff(channel_part, channel, self.song_pointer, self.options) staff_callback = parse_staff.output_notes(divisions) if staff_callback == 'truncate': self.truncate_flag = True # Check for desync errors # Channel length parity check channel_lengths.append(parse_staff.channel_length) # If the reserved truncate command is found, the length check needs to be skipped. if self.truncate_flag == False: if len(channel_lengths) > 1: if (channel_lengths[channel - 1] != channel_lengths[channel - 2]): for chan in range(0, len(channel_lengths)): print('Channel {} length: '.format(chan + 1) + str(channel_lengths[chan])) raise exceptions.MusicDesyncError( self.term_text.parity_check_failed, channel_lengths) # Check to make sure user defined loops are consistent if parse_staff.found_user_loops is False: if already_found_user_loop is True: raise exceptions.MusicDesyncError( self.term_text.desync_error + self.term_text.conversion_incomplete ) self.output_file_store.append( self.output_text.channel_loop_label(channel)) else: if channel != 4: already_found_user_loop = True else: if already_found_user_loop is False: raise exceptions.MusicDesyncError( self.term_text.desync_error + self.term_text.conversion_incomplete ) self.output_file_store.extend(parse_staff.staff_output) self.output_file_store.append( self.output_text.channel_loop_end(channel)) if not self.options.no_optimizations: optimize.optimize(self.output_file_store, self.song_pointer) return self.output_file_store