def make(recipe, name, version, install_path, cookbook, sha1s=None, overwrite=False): # check that we have the software that we need. software = utils.get_list(recipe, ('dependencies', 'software')) msg = "didn't find required software: %s for %s\n" % (software, name) code = utils.msg_unless(utils.check_software_deps(software), code=5, msg=msg) if code != 0: return code data_deps = utils.get_list(recipe, ("dependencies", "data")) make_deps(data_deps, install_path, cookbook, overwrite) out_files = [] # set template variables tmpl_vars = dict(GGD_PATH=install_path, version=version, name=name, DATE=datetime.date.today().strftime("%Y-%m-%d")) # TODO: data dependencies for i, cmd in enumerate(recipe['cmds']): if sha1s is None: tmpl_vars['sha1'] = '' else: try: tmpl_vars['sha1'] = sha1s[i] except IndexError: tmpl_vars['sha1'] = '' sys.stderr.write("WARNING: no SHA1 provided for recipe %s/%d\n" % (name, i)) tcmd = Template(cmd).safe_substitute(tmpl_vars) out = Template(recipe['outfiles'][i]).safe_substitute(tmpl_vars) code = utils.msg_unless(utils.check_outfiles([out], overwrite)) if code != 0: return code out_files.append(out) ret = subprocess.check_call(tcmd, shell=True) msg = "error processing recipe." code = utils.msg_unless(ret == 0, code=ret, msg=msg) if code != 0: return code code = utils.msg_unless(sha_matches(out, tmpl_vars['sha1'], name), code=4) if code != 0: return code for out in out_files: if os.path.exists(os.path.join(install_path, out)): if overwrite: os.unlink(os.path.join(install_path, out)) else: code = utils.msg_unless(False, code=1, msg="ERROR: output file %s exists. Use --overwrite if needed" % os.path.join(install_path, out)) if code != 0: return code shutil.move(out, install_path) return 0
def main(): vw_model = utils.get_word2vec_model('../resources/yelp/word2vec/yelp_restaurants_word2vector', ncols, nwin) vw_model.vectors = utils.normalize2(vw_model.vectors) glove_dict = utils.get_glove_data('../resources/yelp/glove/', 'vectors_' + str(ncols) + '.txt') word_dict = utils.get_word_counts("../resources/yelp/data/yelp_restaurant_word_counts.txt") # train_list, test_list = get_list() # train_list = utils.get_list('../resources/yelp/train_data50000.txt') # test_list = utils.get_list('../resources/yelp/test_data50000.txt') max_rlen = get_max_number_of_token() model = my_model(max_rlen) print("#################### Iterations ################\n") results = [] for j in range(1): acc_hist = {} train_list = utils.get_list('../resources/yelp/cv_train_data_'+str(j) +'.txt') test_list = utils.get_list('../resources/yelp/cv_train_data_'+str(j)+'.txt') half = len(test_list) // 2 for e in range(nepochs): for i in range(int(len(train_list)/nbatch)): x_train, y_train = get_review_windows(vw_model, train_list[nbatch*i:nbatch*(i+1)], max_rlen, ncols, nbatch, glove_dict, word_dict) (loss,acc) = model.train_on_batch(x_train, y_train) print("Train: Epoch:" + str(e+1) + " Loss = " + str(loss) + " -- " + "Accuracy = " + str(acc)) acc_hist[str(e+1)] = acc with open("../resources/yelp/scores/CNN-VALID_"+str(j), 'w') as out: counter = 0 score = 0 for i in range(int(len(test_list)/nbatch)): x_test, y_test = get_review_windows(vw_model, test_list[nbatch*i:nbatch*(i+1)], max_rlen, ncols, nbatch, glove_dict, word_dict) loss = model.test_on_batch(x_test, y_test) pred = model.predict_proba(x_test) classes = model.predict_classes(x_test) counter += nbatch for p, c in zip(pred, classes): # score: if counter <= half and c == 1: score += 1 elif counter > half and c == 0: score += 1 out.write(str(c) + " " + str(p[1]) + " " + str(p[0]) + "\n") print("Test: Iteration:" + str(i + 1) + " Loss = " + str(loss)) results.append(score) print("######################## Trial = " + str(j)) acc = [] for result in results: acc.append(result) print(result) np_acc = np.array(acc) print("Mean = " + str(np_acc.mean())) print("Std.Dev = " + str(np.std(np_acc, dtype=np.float64)))
def logic(): # 读取文件 # 如果输入无效的操作,则反复操作, 否则输入exit退出 while True: try: # 业务逻辑 info = input("\033[1;35mPlease input your operation: \033[0m") # string -> list input_list = info.split() action = input_list[0] userinfo_list = input_list[1:] #userinfo_string = ' '.join(userinfo_list[1:]) if action == "add": res = add_user(userinfo_list) print(res) elif action == "delete" or action == "del": # .remove res = del_user(userinfo_list) print(res) elif action == "find": res = find_info(userinfo_list) print(res) elif action == "update": res = update_info(userinfo_list) print(res) elif action == "list": get_list() elif action == "display": res = get_pageinfo(userinfo_list) print(res) # elif action == "export": # res = csv_export() # print(res) elif action == "help" or action == "h": print(help_info) elif action == "exit": sys.exit(0) else: print("\033[1;36m输入错误,请输入 help 查看帮助!\033[0m\n") except IndexError: print('\033[1;36m[Errno] list index out of range.\033[0m\n') except FileNotFoundError: print('\033[1;36m[Errno] No such file or directory.\033[0m\n') except TypeError: print('\033[1;36m[Errno] Type Error.\033[0m\n') except KeyError: print('\033[1;36m[Errno] Key Error.\033[0m\n') except Exception as e: print(e) # logic()
def add_listener(res_list, condition, action): if not isinstance(condition, types.FunctionType) \ or not isinstance(condition, types.FunctionType): return None listener_id = utils.calc_function_hash([condition, action]) listener = (res_list, condition, action) listeners[listener_id] = listener for name in res_list: utils.get_list(res_to_listener, name).append(listener_id) return listener_id
def add_listener(res_list, condition, action): if not isinstance(condition, types.FunctionType) \ or not isinstance(condition, types.FunctionType): return None listener_id = utils.calc_function_hash([condition, action]) listener = (res_list, condition, action) listeners[listener_id] = listener for name in res_list: utils.get_list(res_to_listener, name).append(listener_id) return listener_id
def add_show_intent_handler(handler_input): """ handler for Add Show Intent""" h = handler_input.request_envelope.context.system.user.access_token showtype = 'show' # If we are not auth, let the user know if h is None: reprompt = language.AUTH_ERROR handler_input.response_builder.speak(reprompt).ask(reprompt) return handler_input.response_builder.response headers = utils.build_headers(h, clientid) # get our persistent_attributes _perattr = handler_input.attributes_manager.persistent_attributes movie = get_slot_value(handler_input=handler_input, slot_name="showName") user_list = get_slot_value(handler_input=handler_input, slot_name="list_name") _list, _usecustomlist = utils.get_list(user_list, _perattr) # search for move and get the object b = trakt_api.search(movie, headers, showtype, False) if b['error']: # handle this handler_input.response_builder.speak(language.SHOW_404) return handler_input.response_builder.response y = b['show'] # dig through our search and add the movie/show to our list or our Watchlist trakt_api.parse_search(b['type'], headers, y, _list, _usecustomlist, True) utils.notify(movie, b['type'], _list) handler_input.response_builder.speak(movie + " show has been added to your list " + str(_list)) # .ask(reprompt) return handler_input.response_builder.response
def __init__(self, *args, **kwargs): super(DocumentRequestForm, self).__init__(*args, **kwargs) self.fields['title'].widget = forms.TextInput(attrs={'class': 'form-control'}) self.fields['document_type'].widget = forms.Select(attrs={'class': 'form-control'}) self.fields['document_type'].label = 'Document Type' self.fields['document_type'].choices = get_list(DocumentType) self.fields['upload_quantity'].widget = forms.TextInput(attrs={'class': 'form-control'})
def parse_args(): global path_base, log_execute, log_report, collect_list parser = argparse.ArgumentParser() parser.add_argument( '-cl', '--collects', help= 'list project names that need to collect bug reports. "," as delimiter' ) parser.add_argument('-ca', '--clear-all', dest='clear_all', action='store_true', help='clear history data.') parser.set_defaults(clear_all=False) parser.add_argument('-f', '--force', dest='force', action='store_true', help='forcely execute no matter whether ' 'compile successfully before.') parser.set_defaults(force=False) args = parser.parse_args() clear_all = args.clear_all path_base = utils.get_path_base() log_base = utils.get_log_base() log_execute = os.path.join(log_base, "execute") log_report = os.path.join(log_base, "report", 'spotbugs') utils.clear_data(clear_all, log_report) if not os.path.exists(log_report): os.mkdir(log_report) collect_list = utils.get_list(args.collects, log_execute, 'collect') print("[Path] {}".format(log_execute))
def parse_args(): global path_base, log_compile, skip_list, compile_list, is_force, is_purge parser = argparse.ArgumentParser() parser.add_argument('-cl', '--compiles', help='list project names that need to compile. "," as delimiter') parser.add_argument('-ca', '--clear-all', dest='clear_all', action='store_true', help='clear all history data.') parser.set_defaults(clear_all=False) parser.add_argument('-f', '--force', dest='force', action='store_true', help='forcely execute no matter whether ' 'compile successfully before.') parser.set_defaults(force=False) parser.add_argument('-p', '--purge', dest='purge', action='store_true', help='purge local repository') parser.set_defaults(purge=False) args = parser.parse_args() path_base = utils.get_path_base() log_base = utils.get_log_base() compile_list = utils.get_list(args.compiles, path_base, 'compile') clear_all = args.clear_all is_force = args.force is_purge = args.purge if is_purge: log_compile = os.path.join(log_base, 'compile_') else: log_compile = os.path.join(log_base, 'compile') skip_list = utils.get_skips('compile') utils.clear_data(clear_all, log_compile) if not os.path.exists(log_compile): os.mkdir(log_compile) print("[path] {}".format(log_compile)) print('project status time')
def get_restaurants(): if request.method == 'GET': return render_template('restaurants.html', data=restaurants.filters_object()) lat = utils.get_field(request, 'lat', required=True) lng = utils.get_field(request, 'long', required=True) rad = utils.get_num(request, 'radius', 1, 20, required=True) cuisines = utils.get_list(request, 'cuisines') categories = utils.get_list(request, 'categories') price = utils.get_num(request, 'price', required=True) user_id = utils.get_num(request, 'user_id', required=True) limit = utils.get_num(request, 'limit') offset = utils.get_num(request, 'offset') return restaurants.get_restaurants(lat, lng, rad, price, limit, offset, cuisines, categories)
def getlist(): listname = request.args.get("lname") sortby = request.args.get("sort") searchby = request.args.get("search") query = request.args.get("q") l = utils.get_list(listname, sortby, searchby, query) return json.dumps(l)
def get(self): filename = self.request.get("filename") mlistble, mlistwifi = utils.get_list(filename) params = { 'filename': filename, 'mlistwifi': mlistwifi, 'mlistble': mlistble } self.render_template('especialgraphic.html', params)
def add_movie_intent_handler(handler_input): """Handler for Add Movie Intent.""" # get our persistent_attributes # if the user has launched the app greet them # set out session attributes _perattr = handler_input.attributes_manager.persistent_attributes attr = handler_input.attributes_manager.session_attributes if is_request_type("LaunchRequest")(handler_input): # _usecustomlist = _perattr['usecustomlist'] attr["movie"] = {} attr["show"] = {} attr['readBoxOffice'] = False attr['readMovies'] = False attr['readShows'] = False attr['readBoth'] = False attr['active_request'] = '' attr['repeat'] = '' handler_input.response_builder.speak("Welcome To Radar the Trakt.tv tracker").ask("") return handler_input.response_builder.response # Get the value of the users auth token h = handler_input.request_envelope.context.system.user.access_token # _list = 'watchlist' _usecustomlist = False # If we are not auth, let the user know if h is None: handler_input.response_builder.speak(language.AUTH_ERROR) return handler_input.response_builder.response # Set all our headers for the trakt-api headers = utils.build_headers(h, clientid) print("Header= " + str(headers)) # Get the movie name and throw it onto the movie var movie = get_slot_value(handler_input=handler_input, slot_name="movieName") use_list = get_slot_value(handler_input=handler_input, slot_name="list_name") # reprompt = "Are you sure you want to add "+movie+' to your list ?' # user gave us nothing lets do some checks to make sure we have saved attributes _list, _usecustomlist = utils.get_list(use_list, _perattr) # search for move and get the object b = trakt_api.search(movie, headers, "movie", True) if b['error']: # handle this handler_input.response_builder.speak("I couldn't find the show you requested") return handler_input.response_builder.response # force our movie/show object into a small var to make things easier y = b["movie"] # dig through our search and add the movie/show to our list or our Watchlist if trakt_api.parse_search(b['type'], headers, y, _list, _usecustomlist, True): # media_name, media_type, a_list utils.notify(movie, b['type'], _list) handler_input.response_builder.speak(movie + " has been added to your " + _list + " list") # .ask(reprompt) else: # TODO Fix the notify to allow errors # utils.notify(movie, b['type'], _list) handler_input.response_builder.speak("There was a problem adding " + movie + " to your list " + _list) return handler_input.response_builder.response
def load_background_images(self): print("Loading background images...") for bi in utils.get_list(self.background_images_path, '*_pickle.pkl'): background_image = BackgroundImage(bi) if background_image.has_support_surface(): self.background_images_list.append(background_image) self.background_images_dict[path.basename(background_image.filename)] = background_image print("Loaded %i background images." % len(self.background_images_list))
def _run_recipe(args, recipe): """ Execute the contents of a YAML-structured recipe. """ return make(recipe['recipe']['make'], recipe['attributes']['name'], recipe['attributes']['version'], utils.get_install_path(args), args.cookbook, sha1s=utils.get_list(recipe['attributes'], 'sha1'), overwrite=args.overwrite)
def add(playlists): """Adds the current song to the given playlists. """ if not playlists: playlists = get_list(["Playlists"] ,rb.getPlaylists() ,multiple = True ,text = "Add to playlists:") song = player.getPlayingUri() for playlist in playlists: rb.addToPlaylist(playlist, song)
def remove(playlists): """Removes the current song from the given playlists. """ if not playlists: playlists = get_list(["Playlists"] ,rb.getPlaylists() ,multiple = True ,text = "Remove from playlists:") song = player.getPlayingUri() for playlist in playlists: rb.removeFromPlaylist(playlist, song)
def get(self): filename = self.request.get("filename") int_time, finish_time = utils.get_period(filename) mlistble, mlistwifi = utils.get_list(filename) params = { 'filename': filename, 'mlistwifi': mlistwifi, 'mlistble': mlistble, 'int_time': int_time, 'finish_time': finish_time } self.render_template('machineLearningAdvanced.html', params)
def make_deps(deps, install_path, cookbook, overwrite=False): # deps looks like: [{'version': 1, 'name': 't2'}, {'version': 1, 'name': 't1'}] for dep in deps: sys.stderr.write("\ninstalling dependency: {name}:{version}\n"\ .format(**dep)) recipe_dict = _get_recipe(dep['name'], cookbook) sha1s = utils.get_list(recipe_dict['attributes'], 'sha1') ret = make(recipe_dict["recipe"]["make"], dep['name'], dep['version'], install_path, cookbook, sha1s=sha1s, overwrite=overwrite) code = utils.msg_unless(ret == 0, code=ret, msg="error installing dependency: %s" % dep['name']) if code != 0: sys.exit(code)
def main(): results = [] vw_model, word_dict, stop_words, glove_dict = get_wordvectors() for i in range(div): #train_list, test_list = create_data(i) train_list = utils.get_list('../resources/yelp/cv_train_data_'+ str(i) +'.txt') test_list = utils.get_list('../resources/yelp/cv_test_data_'+ str(i) +'.txt') y_train, x_train = utils.create_feature_matrix(vw_model, glove_dict, train_list, "../resources/yelp/data/", "yelp_wvgl_train_cluster_normalize_"+ str(ncols) +"_"+str(nwin)+".txt", stop_words, word_dict, 1) y_test, x_test = utils.create_feature_matrix(vw_model, glove_dict, test_list, "../resources/yelp/data/", "yelp_wvgl_test_cluster_normalize_" +str(ncols)+"_"+str(nwin)+"5.txt", stop_words, word_dict, 1) model = models.Sequential() model.add(layers.Dense(ncols//2, activation=activations.relu, input_shape=(ncols * ncols_mul,))) model.add(layers.Dropout(0.5)) model.add(layers.Dense(1, activation=activations.sigmoid)) model.compile(optimizer=optimizers.Adadelta(), loss=losses.binary_crossentropy, metrics=['accuracy']) history = model.fit(x_train, y_train, epochs=10, batch_size=25) pred = model.predict_proba(x_test) classes = model.predict_classes(x_test) utils.write_score2(pred, classes, "WARV-TEST_"+str(i), "../resources/yelp/scores/") results.append(model.evaluate(x_test, y_test)) acc = [] for result in results: acc.append(result[1]) print(result) np_acc = np.array(acc) print("Mean = " + str(np_acc.mean())) print("Std.Dev = " + str(np.std(np_acc, dtype=np.float64)))
def _objects(self, **kwargs): """ Return a array of immediate children. @return: an unordered list of immediate children """ ret = [] for cname in self._order: try: if object_container_filter(self._children[cname], **kwargs): ret += utils.get_list(self._children[cname]) except exceptions.InvalidObject, e: # remove the invalid object from this container logging.getLogger('cone').warning( 'Removing invalid child because of exception %s' % e) self._remove(cname) continue
def load_patches(self): """ Load patches from pickle if available, otherwise load source images and extract patches :return: """ if self.load_pickled_patches and path.isfile(self.pickled_patches_filename): print("Loading patches from", self.pickled_patches_filename, "...") with open(self.pickled_patches_filename, 'rb') as f: self.patches_list = pickle.load(f) for patch in self.patches_list: elevation_bucket = int(patch.get_elevation() / self.elevation_bucket_size) self.patches_dict[patch.get_class_id()][elevation_bucket].append(patch) print("Loaded %i patches." % len(self.patches_list)) else: print("Loading patches from source images...") for image_path, label_path, info_path in utils.get_list(self.source_images_path, '.png', '__labels.json', '.yaml'): # load image print("\tLoading source image:", image_path) source_image = SourceImage(image_path, label_path, info_path, self.class_name_to_id, self.class_id_to_name) # extract patches for patch in source_image.get_patches(): self.patches_dict[patch.get_class_id()][int(patch.get_elevation() / self.elevation_bucket_size)].append(patch) print("\t\tAdding patch:", self.class_id_to_name[patch.get_class_id()]) self.patches_list.append(patch) # unload image # del source_image # try this if too much memory is used if len(self.patches_list) > 0: print("Saving", self.pickled_patches_filename, "...") with open(self.pickled_patches_filename, 'wb') as f: pickle.dump(self.patches_list, f, pickle.HIGHEST_PROTOCOL) else: print("No patches loaded, not saving the pickled patches.") print("Done.")
def object_container_filter(obj, **kwargs): """ Create a list of filter functions for each argument """ filters = [] if kwargs.has_key('name'): filters.append(lambda x: re.match(kwargs.get('name'), x._name)) if kwargs.has_key('path'): filters.append(lambda x: re.match(kwargs.get('path'), x._path())) if kwargs.has_key('type'): filters.append(lambda x: isinstance(x, kwargs.get('type'))) if kwargs.has_key('filters'): filters += kwargs.get('filters') ret = [] for sobj in utils.get_list(obj): if utils.filter(obj, filters): ret.append(sobj) return ret
def parse_args(): global path_base, log_execute, skip_list, execute_list, is_force, exepath parser = argparse.ArgumentParser() parser.add_argument( '-el', '--executes', help='list project names that need to executes. "," as delimiter') parser.add_argument('-ca', '--clear-all', dest='clear_all', action='store_true', help='clear history data.') parser.set_defaults(clear_all=False) parser.add_argument('-f', '--force', dest='force', action='store_true', help='forcely execute no matter whether ' 'execute successfully before.') parser.add_argument('-ep', '--exepath', dest='exepath', action='store_true', help='get spotbugs analyzed classes') parser.add_argument('-nf', '--no-filter', dest='filter', action='store_false') parser.set_defaults(filter=True) args = parser.parse_args() clear_all = args.clear_all is_force = args.force path_base = utils.get_path_base() log_base = utils.get_log_base() exepath = args.exepath if args.exepath: log_execute = os.path.join(log_base, 'exepath') else: log_execute = os.path.join(log_base, 'execute') execute_list = utils.get_list(args.executes, path_base, 'execute') skip_list = utils.get_skips('execute') utils.clear_data(clear_all, log_execute) if not os.path.exists(log_execute): os.mkdir(log_execute) print("[Log Path] {}".format(log_execute))
def file_handler(bot, update): """ Handler of telegram messages with either document or image :param bot: :param update: :return: """ if update.message.document or update.message.photo: if update.message.caption and '#bugs' in update.message.caption: if update.message.document: attach = update.message.document else: attach = update.message.photo[-1] mime_type = getattr(attach, 'mime_type', 'image') file_name = getattr(attach, 'file_name', 'noname') file = bot.get_file(attach.file_id) if update.message.caption: bug_text = update.message.caption else: bug_text = 'Bug from Telegram chat' if len(bug_text) > 20: card_title = f'{bug_text[:20]}...' else: card_title = bug_text trello_list = utils.get_list(update) if trello_list: card = trello_list.add_card(card_title, bug_text) card.attach(file=file.download_as_bytearray(), mimeType=mime_type, name=file_name) if card: bot.send_message( chat_id=update.message.chat_id, text=f'A card was successfully created: {card.url}') else: bot.send_message(chat_id=update.message.chat_id, text='Failed to create a card')
def message_handler(bot, update): """ Handler for plain text messages :param bot: :param update: :return: """ bug_text = update.message.text card_title = f'{bug_text[:20]}...' if len(bug_text) > 20 else bug_text trello_list = utils.get_list(update) if trello_list: card = trello_list.add_card(card_title, bug_text) if card: bot.send_message( chat_id=update.message.chat_id, text=f'A card was successfully created: {card.url}') else: bot.send_message(chat_id=update.message.chat_id, text='Failed to create a card')
def _get(self, path): """ Get a child object by it path. @return: The child object if it is found. @raise NotFound: when object is not found from the children. """ try: # traverse to the actual child element curelem = self for pathelem in utils.dottedref.split_ref(path): if utils.dottedref.get_index(pathelem) == None: curelem = curelem._children[pathelem] else: # If the given pathelem is referring to a list name = utils.dottedref.get_name(pathelem) index = utils.dottedref.get_index(pathelem) curelem = utils.get_list(curelem._children[name])[index] return curelem # Catch the KeyError exception from dict and IndexError from list except (KeyError, IndexError), e: raise exceptions.NotFound("Child %s not found from %s! %s" % (path, self, e))
fulltext_files = list() # Go through the columns data for col_key, cell_data in enumerate(row): element = md_header[col_key]['element'] qualifier = md_header[col_key]['qualifier'] delimit = md_header[col_key]['delimit'] content = cell_data.value if not element or not content: continue if element == 'filenames': if delimit: fulltext_files = get_list(content, delimit) else: fulltext_files.append(content) continue if element == 'date': content = datetime.strftime(content, '%Y-%m-%d') # Handle multiple values if delimit: multi_value = get_list(content, delimit) for value in multi_value: dcvalue = et.Element('dcvalue', attrib={ "element": element, "qualifier": qualifier
def remove_movie_intent_handler(handler_input): # Get the value of the users auth token h = handler_input.request_envelope.context.system.user.access_token # If we are not auth, let the user know if h is None: handler_input.response_builder.speak(language.AUTH_ERROR).ask(language.AUTH_ERROR) return handler_input.response_builder.response # Set all our headers for the trakt-api headers = utils.build_headers(h, clientid) # TODO make sure we change I,II,II type movies to 1,2,3 # and vice versa user_list = get_slot_value(handler_input=handler_input, slot_name="list_name") movie = str(get_slot_value(handler_input=handler_input, slot_name="movieName")) # get our persistent_attributes attr = handler_input.attributes_manager.persistent_attributes _list, _usecustomlist = utils.get_list(user_list, attr) # if our list isnt empty then we can go ahead amd deal with the request if _usecustomlist: url = 'https://api.trakt.tv/users/me/lists/' + _list + '/items/movies' r = requests.get(url, headers=headers) if r.status_code == 200 or r.status_code == 201: dcode = json.loads(r.text) # print(json.dumps(json.loads(r.text), sort_keys=True, indent=4)) i = 0 _moviefound = False while i < len(dcode): # print(dcode[i]['name']) # print(json.dumps(dcode[i], sort_keys=True, indent=4)) o = dcode[i]["movie"]['title'] # print(str(o) + " is our title") # if our movie name matches the movie send the request to delete it if o.lower() == movie.lower(): _moviefound = True # print("we found it") # print(json.dumps(dcode[i], sort_keys=True, indent=4)) if trakt_api.parse_delete_search("movie", headers, dcode[i]["movie"], _list, _usecustomlist): handler_input.response_builder.speak(f"I have deleted {o} from the list {_list}") return handler_input.response_builder.response # return # print("we finished and deleted") # exit("deleted") else: # return handler_input.response_builder.speak(f"I had trouble deleting {o} from the list {_list}") return handler_input.response_builder.response # print("we found the film but there was an error deleting") # exit("not deleted") i += 1 # if we failed to find the movie if _moviefound is False: # print("we couldnt find the film") handler_input.response_builder.speak(f"I couldn't find {movie} on the list {_list}") return handler_input.response_builder.response # if our first request to trakt fails else: handler_input.response_builder.speak("I couldn't contact Trakt.tv API ." + url) return handler_input.response_builder.response # if our user didnt give us a list or they are using the watch list else: # WE DIDNT RECIEVE A LIST # TODO make sure we change I,II,II type movies to 1,2,3 # and vice versa # search for movie and get the object b = trakt_api.search(movie, headers, "movie", False) if b['error']: # handle this reprompt = "I couldn't find the movie you requested" handler_input.response_builder.speak(reprompt).ask(reprompt) return handler_input.response_builder.response # force our movie/show object into a small var to make things easier y = b["movie"] if trakt_api.parse_delete_search("movie", headers, y, _list, False): # media_name, media_type, a_list utils.notify(movie, b['type'], _list, "removed") handler_input.response_builder.speak(f"I have deleted {movie} from the list {_list}") return handler_input.response_builder.response else: handler_input.response_builder.speak(f"I couldn't delete {movie} from the list {_list}") return handler_input.response_builder.response
def save_metrics(self, figure_dir='', draw_mask=True): if self.config.save_metrics: print('-------- Save figures begin --------') # print(history.history.keys()) if figure_dir == '': figure_dir = os.path.join( self.config.root_dir, 'figures', self.config.model_name, self.config.run, '{}{:.4f}_{}{:.4f}'.format( self.config.record_metrics[0], self.results[self.config.record_metrics[0]], self.config.record_metrics[1], self.results[self.config.record_metrics[1]])) else: figure_dir = os.path.join( figure_dir, '{}{:.4f}_{}{:.4f}'.format( self.config.record_metrics[0], self.results[self.config.record_metrics[0]], self.config.record_metrics[1], self.results[self.config.record_metrics[1]])) if not os.path.exists(figure_dir): os.makedirs(figure_dir) epochs = self.history.epoch metrics = self.history.history with open(os.path.join(figure_dir, 'logs.txt'), 'w') as f: f.write('epoch : ' + str(epochs) + '\n') for key, value in metrics.items(): f.write(key + ' : ' + str(value) + '\n') for key, value in self.results.items(): f.write(key + ' : ' + str(value) + '\n') if self.config.run in ['prune', 'atuo']: for key, value in self.prune_info.items(): f.write(key + ' : ' + str(value) + '\n') pair_keys = [ 'loss', 'ift_loss', 'rec_loss', 'PSNR', 'SSIM', 'ift_PSNR', 'ift_SSIM', 'rec_PSNR', 'rec_SSIM' ] for key in pair_keys: if key in metrics.keys() and 'val_' + key in metrics.keys(): plt.figure() plt.plot(epochs, metrics[key]) plt.plot(epochs, metrics['val_' + key]) plt.grid(True) plt.title(key + ' vs epoch') plt.ylabel(key) plt.xlabel('epoch') plt.legend(['Train', 'Val'], loc='upper left') plt.tight_layout() plt.savefig(os.path.join(figure_dir, key + '_vs_epoch.png')) print('Saving figure at ' + os.path.join(figure_dir, key + '_vs_epoch.png')) plt.show(block=False) plt.pause(0.01) del metrics[key] del metrics['val_' + key] for key, value in metrics.items(): plt.figure() plt.plot(epochs, value) plt.grid(True) plt.title(key + ' vs epoch') plt.ylabel(key) plt.xlabel('epoch') plt.tight_layout() plt.savefig(os.path.join(figure_dir, key + '_vs_epoch.png')) print('Saving figure at ' + os.path.join(figure_dir, key + '_vs_epoch.png')) plt.show(block=False) plt.pause(0.01) if draw_mask: pmask_layer_list = get_list(self.model, ['PMask2D']) for i in range(len(pmask_layer_list)): # prob, mask = pmask_layer_list[i].get_weights()[0], pmask_layer_list[i].mask prob = np.squeeze(pmask_layer_list[i].get_weights()[0], axis=(0, 3)) # mask = np.squeeze(pmask_layer_list[i].get_weights()[1], axis=(0, 3)) mask = binomial(prob) plt.figure() plt.title('Probability') plt.subplot(2, 2, 1) fig_obj = plt.imshow(prob, cmap=plt.get_cmap('jet')) plt.colorbar(fig_obj) plt.title('Probability (avg=%.4f)' % np.mean(prob)) plt.subplot(2, 2, 2) fig_obj = plt.imshow(mask, cmap=plt.get_cmap('gray')) plt.colorbar(fig_obj) plt.title('Mask (%.2f%%)' % (100.0 * np.sum(mask) / mask.size)) plt.subplot(2, 2, 3) plt.plot(np.mean(prob, axis=0)) plt.plot(np.mean(prob, axis=1)) plt.legend(['Row', 'Col']) plt.title('PDF') plt.subplot(2, 2, 4) plt.plot(np.mean(mask, axis=0)) plt.plot(np.mean(mask, axis=1)) plt.legend(['Row', 'Col']) plt.title('PDF') plt.tight_layout() plt.savefig(os.path.join(figure_dir, 'Parametric_mask.png')) print('Saving figure at ' + os.path.join(figure_dir, 'Parametric_mask.png')) plt.show(block=False) plt.pause(0.01) pmask_layer_list = get_list(self.model, ['PMask1DH', 'PMask1DV']) for i in range(len(pmask_layer_list)): # prob, mask = pmask_layer_list[i].get_weights()[0], pmask_layer_list[i].mask prob = np.squeeze(pmask_layer_list[i].get_weights()[0], axis=(0, 3)) mask = binomial(prob) plt.figure() plt.title('Probability') plt.subplot(2, 2, 1) fig_obj = plt.imshow(np.broadcast_to(prob, [256, 256]), cmap=plt.get_cmap('jet')) plt.colorbar(fig_obj) plt.title('Probability (avg=%.4f)' % np.mean(prob)) plt.subplot(2, 2, 2) fig_obj = plt.imshow(np.broadcast_to(mask, [256, 256]), cmap=plt.get_cmap('gray')) plt.colorbar(fig_obj) plt.title('Mask (%.2f%%)' % (100.0 * np.sum(mask) / mask.size)) plt.subplot(2, 2, 3) plt.plot(prob) plt.grid(True) plt.title('PDF') plt.tight_layout() plt.savefig(os.path.join(figure_dir, 'Parametric_mask.png')) print('Saving figure at ' + os.path.join(figure_dir, 'Parametric_mask.png')) plt.show(block=False) plt.pause(0.01) print('-------- Save figures end --------') # def prune(self, x_train, y_train, x_test, y_test): # # print('-------- Prune begin --------') # # batchnorm_layer_list = [layer for layer in self.model.layers if 'batch_normalization' in layer.name] # mask_layer_list = [layer for layer in self.model.layers if 'mask' in layer.name] # num_layers = len(mask_layer_list) # # for t in range(self.config.prune_steps): # print('Before prune:') # self.validate(x_test=x_test, y_test=y_test) # # print('-------- Prune step {}: {}% begin --------'.format(t, self.config.sparsity[t] * 100)) # # prune_mask_vectors = prune_batchnorm(model=self.model, layer_list=batchnorm_layer_list, # sparsity=self.config.sparsity[t]) # # for i in range(num_layers): # mask_layer_list[i].set_weights( # [np.reshape(prune_mask_vectors[i], [1, 1, 1, len(prune_mask_vectors[i])])]) # # print('After prune:') # self.validate(x_test=x_test, y_test=y_test) # # print('Retrain:') # ckpt_save_path = os.path.join(self.config.root_dir, 'checkpoints', self.config.model_name, # 'prune_%.2f' % self.config.sparsity[t]) # log_dir = os.path.join(self.config.root_dir, 'logs', self.config.model_name, # 'prune_%.2f' % self.config.sparsity[t]) # self.callbacks = self.instance.initialize_callbacks(ckpt_save_path=ckpt_save_path, log_dir=log_dir) # self.train(x_train, y_train, x_test, y_test) # # print('After retrain:') # self.validate(x_test=x_test, y_test=y_test) # # save_dir = os.path.join(self.config.root_dir, 'results', self.config.model_name, # 'prune_%.2f' % self.config.sparsity[t]) # self.save_model(result_dir=save_dir) # # figure_dir = os.path.join(self.config.root_dir, 'figures', self.config.model_name, # 'prune_%.2f' % self.config.sparsity[t]) # self.save_metrics(figure_dir=figure_dir) # # print('-------- Prune step {}: {}% end --------'.format(t, self.config.sparsity[t] * 100)) # # print('-------- Prune end --------') # def compress(self): # print('-------- Compress begin --------') # # conv_layer_list = [layer for layer in self.model.layers if 'conv' in layer.name] # batchnorm_layer_list = [layer for layer in self.model.layers if 'batch_normalization' in layer.name] # mask_layer_list = [layer for layer in self.model.layers if 'mask' in layer.name] # dense_layer_list = [layer for layer in self.model.layers if 'dense' in layer.name] # num_layers = len(mask_layer_list) # # mask_vector = [None] * num_layers # final_channel = [None] * num_layers # # # obtain mask and number of channels # for i in range(num_layers): # mask = mask_layer_list[i].get_weights()[0] # mask_vector[i] = mask # final_channel[i] = int(np.sum(mask)) # print('Final channel: ' + str([(i, mask_layer_list[i].name, final_channel[i]) for i in range(num_layers)])) # # self.config.final_channel = final_channel # self.config.is_final = True # self.config.restore_model = False # self.config.bn_mask = False # # create final model # final_instance = interface.ModelAdapter(self.config) # final_model = final_instance.create_model() # final_instance.serialize_model() # # final_conv_layer_list = [layer for layer in final_model.layers if 'conv' in layer.name] # final_batchnorm_layer_list = [layer for layer in final_model.layers if 'batch_normalization' in layer.name] # final_dense_layer_list = [layer for layer in final_model.layers if 'dense' in layer.name] # # # first conv-bn-mask-relu block # print('Compress the first conv-bn-mask-relu block ' + conv_layer_list[0].name) # conv_weights = conv_layer_list[0].get_weights() # bn_weights = batchnorm_layer_list[0].get_weights() # cur_mask = mask_vector[0] # # kernel # conv_weights[0] *= cur_mask # conv_weights[0] = conv_weights[0][..., np.where(np.squeeze(cur_mask) == 1)[0]] # # bias # conv_weights[1] *= np.squeeze(cur_mask) # conv_weights[1] = conv_weights[1][..., np.where(np.squeeze(cur_mask) == 1)[0]] # # batchnorm: gamma, beta, mean, variance # for j in range(len(bn_weights)): # bn_weights[j] *= np.squeeze(cur_mask) # bn_weights[j] = bn_weights[j][..., np.where(np.squeeze(cur_mask) == 1)[0]] # # weights transfer # final_conv_layer_list[0].set_weights(conv_weights) # final_batchnorm_layer_list[0].set_weights(bn_weights) # # # reduce the in-channel of the convolution in the first Resblock # conv_weights = conv_layer_list[1].get_weights() # cur_mask = cur_mask.transpose(0, 1, 3, 2) # # kernel # conv_weights[0] *= cur_mask # conv_weights[0] = conv_weights[0][..., np.where(np.squeeze(cur_mask) == 1)[0], :] # # weight transfer # final_conv_layer_list[1].set_weights(conv_weights) # # # depth-wise convlutional layers in skip connections # dwc = [19, 36] # for idx in dwc: # print('Compress depth-wise ' + conv_layer_list[idx].name) # final_conv_layer_list[idx].set_weights(conv_layer_list[idx].get_weights()) # # remove layer names in list # conv_layer_list = np.delete(conv_layer_list, dwc) # final_conv_layer_list = np.delete(final_conv_layer_list, dwc) # # # regular conv-bn-mask-relu block weights transfer # for i in range(1, num_layers): # print('Compress ' + str((i, conv_layer_list[i].name, batchnorm_layer_list[i].name))) # conv_weights = conv_layer_list[i].get_weights() # bn_weights = batchnorm_layer_list[i].get_weights() # pre_mask = mask_vector[i - 1].transpose(0, 1, 3, 2) # cur_mask = mask_vector[i] # # kernel # conv_weights[0] *= cur_mask # if i % 2 == 1: # conv_weights[0] = conv_weights[0][..., np.where(np.squeeze(cur_mask) == 1)[0]] # if i % 2 == 0: # conv_weights[0] *= pre_mask # conv_weights[0] = conv_weights[0][..., np.where(np.squeeze(pre_mask) == 1)[0], :] # # bias # conv_weights[1] *= np.squeeze(cur_mask) # if i % 2 == 1: # conv_weights[1] = conv_weights[1][..., np.where(np.squeeze(cur_mask) == 1)[0]] # # batchnorm: gamma, beta, mean, variance # for j in range(len(bn_weights)): # bn_weights[j] *= np.squeeze(cur_mask) # if i % 2 == 1: # bn_weights[j] = bn_weights[j][..., np.where(np.squeeze(cur_mask) == 1)[0]] # # weight transfer # final_conv_layer_list[i].set_weights(conv_weights) # final_batchnorm_layer_list[i].set_weights(bn_weights) # # # dense layer weights transfer # for i in range(len(dense_layer_list)): # print('Compress ' + final_dense_layer_list[i].name) # final_dense_layer_list[i].set_weights(dense_layer_list[i].get_weights()) # # print('-------- Compress end --------') return final_model
flags = parse_args() flags.mode = flags.run_mode if flags.mode is None: raise ValueError('Please provide a run mode') flags.checkpoint_dir = os.path.join(flags.output_dir, 'checkpoints') if not os.path.exists(flags.checkpoint_dir): os.makedirs(flags.checkpoint_dir) flags.tensorboard_dir = os.path.join(flags.output_dir, 'tensorboard') if not os.path.exists(flags.tensorboard_dir): os.makedirs(flags.tensorboard_dir) flags.f_layers = get_list(flags.f_layers_str) flags.w_layers = get_list(flags.w_layers_str) flags.f_d_class_sampling = get_list_or_None(flags.f_d_class_sampling_str) flags.rule_classes = get_list_or_None(flags.rule_classes_str) # Input pickles flags.d_pickle = os.path.join(flags.output_dir, flags.d_pickle_name) flags.U_pickle = os.path.join(flags.data_dir, flags.U_pickle_name) flags.validation_pickle = os.path.join(flags.data_dir, flags.validation_pickle_name) # Output pickles flags.w_infer_out_pickle = os.path.join(flags.output_dir, flags.w_infer_out_pickle_name) flags.f_infer_out_pickle = os.path.join(flags.output_dir,
def add_timer_callback(time, callback): utils.get_list(timers, time).append(callback)
os.makedirs(sr_path) model_G = Generator(scale=scale) model_D = Downsampler(scale=scale) if use_cuda: model_G = model_G.cuda() model_D = model_D.cuda() optimizer = torch.optim.Adam([{'params':model_G.parameters()}, {'params':model_D.parameters()}], lr=2.5e-4) scheduler = MultiStepLR(optimizer, milestones=[5000, 7000, 9000], gamma=0.5) # learning rates l1 = nn.L1Loss() # ltloss = loss.LTLoss() filelist = utils.get_list(lr_path) idx = 0 for img in filelist: idx += 1 img_name, ext = os.path.splitext(os.path.basename(img)) logger.info('{:->2d}--> {:>10s}'.format(idx, img_name+ext)) lr = utils.png2tensor(img) h, w = lr.shape[2:] net_input = utils.get_noise(3, h, w) # hr = utils.png2tensor(hr_path + '/' + img_name.split('x{}'.format(scale))[0] + ext, scale=scale, crop=True) if use_cuda: net_input = Variable(net_input) net_input = net_input.cuda()
def add_timer_callback(time, callback): utils.get_list(timers, time).append(callback)
def __init__(self, *args, **kwargs): super(NewTopicForm, self).__init__(*args, **kwargs) self.fields['title'].widget = forms.TextInput(attrs={'class': 'form-control'}) self.fields['category'].widget = forms.Select(attrs={'class': 'form-control'}) self.fields['category'].choices = get_list(Category)
= outputlist[1][:, :, 0:h_half, (w_size - w + w_half):w_size] output[:, :, h_half:h, 0:w_half] \ = outputlist[2][:, :, (h_size - h + h_half):h_size, 0:w_half] output[:, :, h_half:h, w_half:w] \ = outputlist[3][:, :, (h_size - h + h_half):h_size, (w_size - w + w_half):w_size] return output filepath = opt.test_hr_folder if filepath.split('/')[-2] == 'Set5' or filepath.split('/')[-2] == 'Set14': ext = '.bmp' else: ext = '.png' filelist = utils.get_list(filepath, ext=ext) psnr_list = np.zeros(len(filelist)) ssim_list = np.zeros(len(filelist)) time_list = np.zeros(len(filelist)) model = architecture.IMDN_AS() model_dict = utils.load_state_dict(opt.checkpoint) model.load_state_dict(model_dict, strict=True) start = torch.cuda.Event(enable_timing=True) end = torch.cuda.Event(enable_timing=True) i = 0 for imname in filelist: im_gt = sio.imread(imname) im_l = sio.imread(opt.test_lr_folder + imname.split('/')[-1]) if len(im_gt.shape) < 3:
saver = tf.train.Saver(tf.global_variables()) sess = tf.Session(config=config) ckpt = tf.train.get_checkpoint_state(cfg.model_path) if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path): saver.restore(sess, ckpt.model_checkpoint_path) else: sess.run(tf.global_variables_initializer()) merged = tf.summary.merge_all() train_writer = tf.summary.FileWriter(cfg.train_summary_path, sess.graph) test_writer = tf.summary.FileWriter(cfg.test_summary_path) #save=tf.train.Saver(max_to_keep=1, keep_checkpoint_every_n_hours=2) data_list = get_list(cfg.train_list_path) for epoch in range(cfg.init_epoch + 1, cfg.train_iter + 1): if epoch % 2 != 0: random.shuffle(data_list) #每迭代一次,就打乱数据一次 #训练 for data in batch_namequeue(data_list, cfg.batch_size): label = [] image = [] for i in data: image_path, label_path = i.strip('\n').split( ',') #之前生成data_list时在每行后面加上了换行符\n label.append( read_one_picture(label_path, gray=True, resize_val=255.0)) image.append( read_one_picture(image_path, gray=True, dtype=np.float32)) _, lo, ac = sess.run([train_op, model.cost, model.accuracy],