def apply_decluster(self): """ apply window method to the whole catalog and write mainshocks on file """ # get instances of classes we'll need catalog = Catalog() window_var = WindowVar() # from the catalog we want, get earthquakes array on memory earthquake_array = catalog.get_earthquake_array('../catalogs/new_jma.txt') # decluster array, separating mainshocks and aftershocks declustered_array = window_var.decluster(earthquake_array) # save declustered array using pickle file_aux1 = open('data_persistence/declustered_array_window_var', 'wb') pickle.dump(declustered_array, file_aux1) # open declustered array using another name file_aux2 = open('data_persistence/declustered_array_window_var', 'rb') new_declustered_array = pickle.load(file_aux2) # save declustered array # record the mainshocks on a catalog catalog.record_mainshocks(declustered_array, file_write='../results/window_var_method/mainshocks.txt', file_read='../catalogs/jma.txt')
def analyse_regions(self, array_path): """ receives a path to an array of quakes object prints how many quakes there are outside a region that caused shocks on that region """ # auxiliary object catalog = Catalog() # get earthquake array quakes = pickle.load(open(array_path, 'rb')) # analyse kanto kanto_bounds = catalog.get_kanto_bounds() self.analyse_region(quakes, kanto_bounds, 'kanto') # analyse kansai kansai_bounds = catalog.get_kansai_bounds() self.analyse_region(quakes, kansai_bounds, 'kansai') # analyse tohoku tohoku_bounds = catalog.get_tohoku_bounds() self.analyse_region(quakes, tohoku_bounds, 'tohoku') # analyse east_japan east_japan_bounds = catalog.get_east_japan_bounds() self.analyse_region(quakes, east_japan_bounds, 'east_japan')
def read_excel(file='slab++.xlsx'): wb = open_workbook(file) sheet = wb.sheets()[0] number_of_rows = sheet.nrows number_of_columns = sheet.ncols items = [] rows = [] slabs= Catalog() for row in range(4, number_of_rows): slab= Slab() values = [] for col in range(number_of_columns): par=str(sheet.cell(3,col).value) if par: print(par) value = sheet.cell(row,col).value print(par,value) try: slab.params[par]= float(value) except: slab.params[par] = str(value) # save temperature at 600 km using thermal parameter phi = slab.params['thermalPar'] z = 6.0 Ta = 1338.0 Tz = Ta * (1.0 - ( (2.0/np.pi)*np.exp(-1.0*( (np.power(np.pi,2.0)*z)/ (np.power(2.32,2.0) * phi) )) ) ) slab.params['Temp600'] = Tz print(Tz) print(slab.params) slabs.append(slab) return slabs
def fun_new_article_pic(user, article_id=0, article_type='blog', title='', url='', thumb='', father_id=0, group_id='-1'): isnew = 0 if article_type not in Article_Type: return [1, '不支持当前文章类型!'] if article_type == "about": AF_Object = About(_id=user.about._id) isnew = 0 elif article_type == "book-about": try: book = Catalog(_id=group_id) AF_Object = book.about limit = book.authority_verify(user) if test_auth(limit, A_WRITE) is False: return [1, '您无权修改摘要!'] except Exception, err: logging.error(traceback.format_exc()) logging.error('Catalog not exist, id %s' % group_id) return [1, '未找到该目录!']
def run(self): condition=self.condition path=self.path end_cb=self.query_end_cb while(True): condition.acquire() try: while not self.query: condition.wait() q=self.query self.query="" finally: condition.release() def cb(catalog, entry): return self.catalog_cb(q, catalog, entry) catalog=Catalog(path) try: print "run query: "+q catalog.query(q, cb) end_cb(q) finally: catalog.close()
def read_syracuse_thermal(sub='d80'): file = open('data/syracuseetal_parameters_'+sub+'.txt','r') slabs=Catalog() # Initialize Catalog object for line in file.readlines(): val = line.split() if val[0]!= '#':# skip comments at top of file name = val[0] savei=0 for i in range(1,4): # Read in slab names that are more than one string in lenght try: float(val[i]) break except: savei=i name = name + ' ' +val[i] slab=Slab(name) # Initialize slab object slab.params['transition_depth'] = float(val[savei+1]) slab.params['transition_T'] = float(val[savei+2]) slab.params['slab_T'] = float(val[savei+3]) slab.params['moho_T'] = float(val[savei+4]) slab.params['max_mantle_T'] = float(val[savei+5]) slab.params['max_mantle_T_depth'] = float(val[savei+6]) slab.params['transition_offset'] = float(val[savei+7]) slab.params['slab_surface_T_30km']= float(val[savei+8]) slab.params['slab_surface_T_240km'] = float(val[savei+9]) slab.params['min_slab_T_240km'] = float(val[savei+10]) slabs.append(slab) # Add slab object to catalot return slabs
def GetCatalogRecords(firebase: firebase) -> None: """ Get existing catalog records from database and insert into global collection. :param firebase: a firebase connection """ global catalogs global catalogId obj_key_list = [] result = firebase.get('/catalog', None) if result is None: return for i in result.keys(): obj_key_list.append(i) catalogId = i for i in obj_key_list: catalog = Catalog() catalog.setId(i) catalog.setDocumentName(result[i]['document_name']) catalog.setSemester(result[i]['semester']) catalog.setYear(result[i]['year']) catalogs.append(catalog)
def fun_article_delete_src(user, article_id=0, article_type='blog', src_type='code', alias=0, group_id='-1'): if article_type not in Article_Type: return [1, '不支持当前文章类型!'] if src_type not in Agree_Src: return [1, '不支持当前类型的资源!'] if article_type == "about": AF_Object = About(_id=user.about._id) elif article_type == "book-about": try: book = Catalog(_id=group_id) AF_Object = book.about limit = book.authority_verify(user) if test_auth(limit, A_WRITE) is False: return [1, '您无权删除!'] except Exception, err: logging.error(traceback.format_exc()) logging.error('Catalog not exist, id %s' % group_id) return [1, '未找到该知识谱!']
def catalog_init_meta(catalog_id, role): """ Initialize catalog metadata. """ descriptor = registry.lookup(catalog_id)[0]['descriptor'] catalog = Catalog(catalog_factory, descriptor) catalog.init_meta(owner=role)
def catalog_get_meta(catalog_id, key=None): """ Initialize catalog metadata. """ descriptor = registry.lookup(catalog_id)[0]['descriptor'] catalog = Catalog(catalog_factory, descriptor) return catalog.get_meta(key)
def simple_report(self): """ plot the number of earthquakes above some magnitudes just to get an idea of the catalog """ # get a catalog catalog = Catalog() # initially, we have empty lists of magnitude values and number of earthquakes above magnitude mag_values = [] num_quakes = [] # iterate through an interesting range of magnitudes mag_limit = 0.0 while mag_limit <= 10.0: # count how many earthquakes are above that limit quakes = catalog.count_earthquakes(mag_limit) # append information on the lists num_quakes.append(quakes) mag_values.append(mag_limit) # increment mag_limit += 0.1 # plot graph and save result plt.plot(mag_values, num_quakes, 'ro') plt.xlabel('Magnitude ') plt.ylabel('Number of earthquakes') plt.savefig('../results/earthquakes.jpg') plt.close()
def catalog_destroy(catalog_id): """ Destroys a catalog. """ descriptor = registry.lookup(catalog_id)[0]['descriptor'] catalog = Catalog(catalog_factory, descriptor) catalog.destroy()
def __init__(self, code="", area_type=1, epsilon=5, media_path="", with_log=True, catalog="", coord_out="EPSG:3857", center_only=False, with_proxy=False): self.with_log = with_log self.area_type = area_type self.media_path = media_path self.image_url = "" self.xy = [] # [[[area1], [hole1], [holeN]], [[area2]]] self.image_xy_corner = [] # cartesian coord from image, for draw plot self.width = 0 self.height = 0 self.image_path = "" self.extent = {} self.image_extent = {} self.center = {'x': None, 'y': None} self.center_only = center_only self.attrs = {} self.epsilon = epsilon self.code = code self.code_id = "" self.file_name = self.code[:].replace(":", "_") self.with_proxy = with_proxy self.coord_out = coord_out t = string.Template(SEARCH_URL) self.search_url = t.substitute({"area_type": area_type}) t = string.Template(FEATURE_INFO_URL) self.feature_info_url = t.substitute({"area_type": area_type}) if not self.media_path: # self.media_path = os.path.dirname(os.path.realpath(__file__)) self.media_path = os.getcwd() if not os.path.isdir(self.media_path): os.makedirs(self.media_path) if catalog: self.catalog = Catalog(catalog) restore = self.catalog.find(self.code) if restore: self.restore(restore) self.log("%s - restored from %s" % (self.code, catalog)) return if not code: return feature_info = self.download_feature_info() if feature_info: geometry = self.get_geometry() if catalog and geometry: self.catalog.update(self) self.catalog.close() else: self.log("Nothing found")
def read_syracuse(): file = open('data/syracuseetal_table1.txt','r') slabs=Catalog() # Initialize Catalog object for line in file.readlines(): val = line.split() if val[0]!= '#':# skip comments at top of file name = val[0] savei=0 for i in range(1,4): # Read in slab names that are more than one string in lenght try: float(val[i]) break except: savei=i name = name + ' ' +val[i] print(val,savei) slab=Slab(name) # Initialize slab object slab.params['lon']= float(val[savei+1]) slab.params['lat'] = float(val[savei+2]) slab.params['H'] = float(val[savei+3]) slab.params['arc_trench_distance'] = float(val[savei+4]) slab.params['slab_dip'] = float(val[savei+5]) slab.params['Vc'] = float(val[savei+6]) slab.params['age'] = float(val[savei+7]) slab.params['decent_rate'] = float(val[savei+8]) slab.params['thermal_parameter'] = float(val[savei+9]) slab.params['sediment_thickness'] = float(val[savei+10]) slab.params['subducted_sediment_thickness'] = float(val[savei+11]) slab.params['upper_plate_type'] = val[savei+12] #slab.upper_plate_thickness = float(val[savei+12]) #slab.upper_plate_age = float(val[savei+13]) slabs.append(slab) # Add slab object to catalot return slabs
def start_viewfs(): if args.rebuild or not os.path.exists(config.watch.catalog_path): logger.info('building catalog...') catalog = Catalog.build(config.watch.collection_root) catalog.save(config.watch.catalog_path) catalog = None gc.collect() if args.rebuild: sys.exit(0) logging.basicConfig( level=logging.DEBUG if config.verbose else logging.INFO) watch_process = subprocess.Popen(['python3', 'watch.py']) logger.info('building tree...') catalog = Catalog.load(config.watch.catalog_path, config.watch.collection_root) tree_view = TreeView.from_catalog(catalog) catalog = None gc.collect() notifier = SystemNotifier() if watch_process.poll() != None: # watch crashed notifier.stopping() sys.exit(2) logger.info('mounting filesystem on ' + config.view.view_root) filesystem = Filesystem(logger, tree_view) notifier.ready() filesystem.start()
def catalog_remove_meta(catalog_id, key, value=None): """ Initialize catalog metadata. """ descriptor = registry.lookup(catalog_id)[0]['descriptor'] catalog = Catalog(catalog_factory, descriptor) catalog.remove_meta(key, value)
def read_seracuse(): file = open('data/syracuseetal_parameters.txt','r') slabs=Catalog() for line in file.readlines(): val = line.split() name = val[0] savei=0 for i in range(1,4): try: float(val[i]) break except: savei=i name = name + ' ' +val[i] slab=Slab(name) slab.transition_depth = float(val[savei+1]) slab.transition_T = float(val[savei+2]) slab.slab_T = float(val[savei+3]) slab.moho_T = float(val[savei+4]) slab.max_mantle_T = float(val[savei+5]) slab.max_mantle_T_depth = float(val[savei+6]) slab.transition_offset = float(val[savei+7]) slab.slab_surface_T_30km = float(val[savei+8]) slab.slab_surface_T_240km = float(val[savei+9]) slab.min_slab_T_240km = float(val[savei+10]) slabs.append(slab) return slabs
class PublicLibrary(): def __init__(self): self.catalog = Catalog() self.catalog.initBooks() self.loanAdministration = LoanAdministration(self.catalog) self.loanAdministration.initCustomers() print(self) def makeBackup(self, namefile: str): """ 파일명을 입력받으면 PublicLibrary 객체를 저장 pickle을 이용 :param namefile: 저장할 파일 이름 :return:x """ with open(namefile, 'wb') as f: pickle.dump(self, f, pickle.HIGHEST_PROTOCOL) def restoreBackup(self, name: str): """ 입력 파라미터를 pickle로 불러오는 기능 :param name: 불러올 파일 이름 :return: """ with open(name, 'rb') as f: data = pickle.load(f) print(data)
def do_chi_square(self, catalog_file, alpha, seconds): """ receives a catalog file, a significance level and a number of seconds do a hyphotesis test to detect whether the catalog is poissonian or not, under the significance level H0 -> catalog is poissonian H1 -> catalog is not poissonian prints the p-value and the significance level """ # get observed frequencies catalog = Catalog() observed_frequencies = catalog.get_observed_frequencies(catalog_file, seconds) # print if the observed frequencies are too low for x in observed_frequencies: if x < 5: print("Warning: the number of occurrences appear to be too low!") break # get the number of restrictions restrictions = 1 # 1 restriction, since lambda - rate of occurrence - is estimated from the parameters # perform chi square test result = chisquare(observed_frequencies, ddof=restrictions) # get the p_value p_value = result[1] # print results showing the p value and the significance level print("the p_value was: ", p_value) print("the significance level was: ", alpha)
def GetExistingCatalogRecords(firebase: firebase, documentName: str, documentSemester: str, documentYear: str) -> None: """ Get existing catalog records from database and insert into global collection. :param firebase: a firebase connection :param documentName: a string representing the catalog document name :param documentSemester: a string representation of the catalog academic semester :param documentYear: a string representation of the catalog year """ global existingCatalogRecords obj_key_list = [] result = firebase.get('/catalog', None) if result is None: return for i in result.keys(): obj_key_list.append(i) for i in obj_key_list: catalog = Catalog() catalog.setId(i) catalog.setDocumentName(result[i]['document_name']) catalog.setSemester(result[i]['semester']) catalog.setYear(result[i]['year']) existingCatalogRecords.append(catalog)
def resolve_assortment_tree(assortment): cat = Catalog('Наш каталог', []) for item in assortment: # заглушка, пока фоточки неоткуда грузить # item[4] = 'https://megaflowers.ru/pub/bouquet/vse-budet-horosho_m.jpg' insert_into_cat(cat, item[5].split(','), Catalog(item[1], \ [str(item[3]), item[4]])) return cat
def fun_article_update_src(user, article_id=0, article_type='blog', src_type='code', alias='1', title='', body='', source='', code_type='python', math_type='inline', group_id='-1'): ''' update the lib of the article, article_type is about|blog|comment article_id must be have need alias too, then we find the lib _id, create the object, and set of it ''' if article_type not in Article_Type: return [1, '不支持当前文章类型!'] if src_type not in Agree_Src: return [1, '暂不支持当前资源的类型!'] if title is None: return [1, '请您填写标题/名称!'] if src_type != 'image': if body is None: if src_type == 'r': if source is None: return [1, '出处不能为空'] if re.search(r'^(http|https|ftp):\/\/.+$', source) is None: return [1, '请填写链接地址或者引用真实内容!'] body = '' else: return [1, '内容不能为空!'] if src_type == 'math': if math_type not in ['display', 'inline']: math_type = 'display' body = math_encode(xhtml_unescape(body)) if src_type == 'code': if code_type not in Agree_Code: return [1, '目前不支持此类型的程序代码!'] if article_type == "about": AF_Object = About(_id=user.about._id) elif article_type == "book-about": try: book = Catalog(_id=group_id) AF_Object = book.about limit = book.authority_verify(user) if test_auth(limit, A_WRITE) is False: return [1, '您无权修改!'] pass except Exception, err: logging.error(traceback.format_exc()) logging.error('Catalog not exist, id %s' % group_id) return [1, '未找到该知识谱!']
class Extractor(object): __metaclass__ = ABCMeta name = 'Course Extractor' def __init__(self, spider=None, input_directory='raw_output'): """ :param spider: spider :type spider: CourseSpider :return: """ if spider is None: self.input_directory = input_directory self.catalog = Catalog() self.name = self.input_directory self.timestamp = date.today() else: self.input_directory = spider.get_raw_output() self.catalog = Catalog() self.name = spider.get_name() self.timestamp = spider.get_timestamp() def set_input_directory(self, directory): # set the directory of raw html files self.input_directory = directory @abstractmethod def extract_each(self, filename): """ :param filename: :return: extracted catalog :rtype: Catalog """ pass def extract_all(self): file_list = [self.input_directory+'/' + f for f in os.listdir(self.input_directory)] num_files = len(file_list) print '[INFO] start extracting from %s' % self.name print '[INFO] total files: %d' % num_files for each in file_list: print '[INFO] processing file: %s, %d remaining' % (each, num_files) self.catalog.append_catalog(self.extract_each(each)) num_files -= 1 print '[SUMMARY] finished extraction' def to_csv(self): print '[INFO] saving catalog to file' self.catalog.to_csv(self.name+'__'+str(self.timestamp), sep=',') def print_result(self): print self.catalog
def plot_histograms(self, folder): """ receives a folder, corresponding to the path of the method we're using ex: folder = '../results/window_method/' plot histograms showing how much quakes have occurred on each year for the regions of kanto, kansai, tohoku and east japan """ # auxiliar variable catalog = Catalog() # get bounds so to include all the japan bounds = [0.0, 360.0, 0.0, 360.0] # plot histogram for all japan, considering only mainshocks self.show_quakes_by_year(folder + 'classified_quakes.txt', bounds, folder + 'japan_mainshocks.jpg', 'Mainshocks by year on japan region', mainshocks=True) # plot histogram for japan, considering everything self.show_quakes_by_year(folder + 'classified_quakes.txt', bounds, folder + 'japan_all.jpg', 'Quakes by year on japan region', mainshocks=False) # get bounds of kanto bounds = catalog.get_kanto_bounds() # plot histogram for kanto, considering only mainshocks self.show_quakes_by_year(folder + 'classified_quakes.txt', bounds, folder + 'kanto_mainshocks.jpg', 'Mainshocks by year on kanto region', mainshocks=True) # plot histogram for kanto, considering everything self.show_quakes_by_year(folder + 'classified_quakes.txt', bounds, folder + 'kanto_all.jpg', 'Quakes by year on kanto region', mainshocks=False) # get bounds of kansai bounds = catalog.get_kansai_bounds() # plot histogram for kansai, considering only mainshocks self.show_quakes_by_year(folder + 'classified_quakes.txt', bounds, folder + 'kansai_mainshocks.jpg', 'Mainshocks by year on kansai region', mainshocks=True) # plot histogram for kansai, considering everything self.show_quakes_by_year(folder + 'classified_quakes.txt', bounds, folder + 'kansai_all.jpg', 'Quakes by year on kansai region', mainshocks=False) # get bounds of tohoku bounds = catalog.get_tohoku_bounds() # plot histogram for tohoku, considering only mainshocks self.show_quakes_by_year(folder + 'classified_quakes.txt', bounds, folder + 'tohoku_mainshocks.jpg', 'Mainshocks by year on tohoku region', mainshocks=True) # plot histogram for tohoku, considering everything self.show_quakes_by_year(folder + 'classified_quakes.txt', bounds, folder + 'tohoku_all.jpg', 'Quakes by year on tohoku region', mainshocks=False) # get bounds of east_japan bounds = catalog.get_east_japan_bounds() # plot histogram for east_japan, considering only mainshocks self.show_quakes_by_year(folder + 'classified_quakes.txt', bounds, folder + 'east_japan_mainshocks.jpg', 'Mainshocks by year on east_japan region', mainshocks=True) # plot histogram for east_japan, considering everything self.show_quakes_by_year(folder + 'classified_quakes.txt', bounds, folder + 'east_japan_all.jpg', 'Quakes by year on east_japan region', mainshocks=False)
def __init__(self, obs, sparseField=None, excludeList=["X_IMAGE", "Y_IMAGE"]): """adjusted constructor to allow a passed excludeList for the exclusion of a passed list of fields from the xml markup. """ Catalog.__init__(self, obs, sparseField, excludeList)
def fun_new_book(user=None, name="", summary=""): if user is None: return [1, "请您先登陆!"] book = Catalog() book.owner = user book.name = name book.about.body = summary book_add_manager(book, user) return [0, str(book._id)]
def fun_new_book(user=None, name='', summary=''): if user is None: return [1, '请您先登陆!'] book = Catalog() book.owner = user book.name = name book.about.body = summary book_add_manager(book, user) return [0, str(book._id)]
def test_catalog(self): test_text_taka_phone = Catalog("Бот, скажи телефон Така") self.assertEqual(test_text_taka_phone.get_help(), "+7-905-776-20-86") test_text_name = Catalog("Бот, скажи телефон така") self.assertEqual(test_text_name.get_help(), "+7-905-776-20-86") test_text_name = Catalog("Бот, скажи телефон таки") self.assertEqual(test_text_name.get_help(), "+7-905-776-20-86")
def plot_heat_all(self): """ receives nothing plot heat maps for all regions of japan returns nothing """ # auxiliar classes that we'll be needed draw = Draw() cat = Catalog() # information relative to the regions BOUNDS = 0 ZOOM_INDEX = 1 ZOOM_VALUE = 9 BINS = 2 info = {} #info["kanto"] = [cat.get_kanto_bounds(), ZOOM_VALUE, 25] info["kansai"] = [cat.get_kansai_bounds(), ZOOM_VALUE, 25] #info["tohoku"] = [cat.get_tohoku_bounds(), ZOOM_VALUE, 25] #info["east_japan"] = [cat.get_east_japan_bounds(), ZOOM_VALUE, 25] # list containing the number of clusters to plot and the time period to consider time_period = [[0.0, 12 * 366 * 24.0 * 3600.0]] # get all valid combinations combinations = list(itertools.product(info, time_period)) print(combinations) REGION_IND = 0 TIME_IND = 1 # iterate through all combinations for comb in combinations: print(comb) # get region region = comb[REGION_IND] # folder we save the results into folder = '../images/single_link/' # obtain array of quakes path = '../results/single_link/declustered_array_' + region quakes = pickle.load(open(path, 'rb')) # plot background for current region #draw.plot_quakes_coast_slc(quakes, comb[TIME_IND]) #call(["mv", "temp.png", folder + region + "_back_heat_coast-1.png"]) #input("Edit the image and then press Enter") # plot cluster for the current data we have stat = Statistic() self.plot_heat(quakes, comb[TIME_IND], info[region][BINS], folder + region +"_back_heat_coast-1.png") # save it in the correct format and do cleaning call(["mv", "temp.png", folder + region + "_heat_coast.png"])
def anonymisation(identifier): """ Metoda implementująca anonimizację danych medycznych :param pat_name: Identyfikator. """ examination = Catalog.get(identifier) #Zmiana statusu w bazie danych examination.status=Status.finished_anonymisation Catalog.commit() return
def get_region_bounds(region): cat = Catalog() if region == "kanto": return cat.get_kanto_bounds() elif region == "kansai": return cat.get_kansai_bounds() elif region == "tohoku": return cat.get_tohoku_bounds() elif region == "east_japan": return cat.get_east_japan_bounds() else: exit("region not known")
def status(identifier): """ Metoda aktualizująca status w bazie danych po rekonstrukcji danych :param pat_name: Identyfikator. """ examination = Catalog.get(identifier) #Zmiana statusu w bazie danych examination.status=Status.reco_finished Catalog.commit() return
def scan_status(identifier): """ Funkcja komunikuje się ze skanerem PET i sprawdza status wykonywanej procedury. Gdy skan zostaje ukończony, funkcja aktualizuje status badania w systemie. :param pat_name: Identyfikator. """ examination = Catalog.get(identifier) #Zmiana statusu w bazie danych examination.status=Status.finished_scanning Catalog.commit() UI.communique("The Scan was executed") return
def plot_clusters_all(self): """ plot clusters for all regions of japan you can select the number of clusters and the time period you want """ # auxiliar classes that we'll be needed draw = Draw() cat = Catalog() # information relative to the regions BOUNDS = 0 ZOOM_INDEX = 1 ZOOM_VALUE = 9 info = {} info["kanto"] = [cat.get_kanto_bounds(), ZOOM_VALUE] #info["kansai"] = [cat.get_kansai_bounds(), ZOOM_VALUE] #info["tohoku"] = [cat.get_tohoku_bounds(), ZOOM_VALUE] #info["east_japan"] = [cat.get_east_japan_bounds(), ZOOM_VALUE] # list containing the number of clusters to plot and the time period to consider num_cluster = [10] time_period = [[0.0, 12 * 366 * 24.0 * 3600.0]] # get all valid combinations combinations = list(itertools.product(info, num_cluster, time_period)) REGION_IND = 0 CLUSTER_IND = 1 TIME_IND = 2 # iterate through all combinations for comb in combinations: # get region region = comb[REGION_IND] # folder we save the results into folder = '../images/single_link/' # obtain array of quakes path = '../results/single_link/declustered_array_' + region quakes = pickle.load(open(path, 'rb')) # plot background for current region draw.plot_quakes_coast_slc(quakes, comb[TIME_IND], num_clusters = comb[CLUSTER_IND]) call(["mv", "temp.png", folder + region + "_back_coast-1.png"]) input("Edit the image and then press Enter") # plot cluster for the current data we have self.plot_clusters(quakes, comb[CLUSTER_IND], comb[TIME_IND], folder + region +"_back_coast-1.png") # save it in the correct format and do cleaning call(["mv", "temp.png", folder + region + "_clusters_coast.png"])
def __init__(self, config_file): """ Trains and saves a model using Keras and TensorFlow :param config_file: json config file with settings including hyperparameters """ with open(config_file) as json_file: self.config = json.load(json_file) self.catalog = Catalog() self.catalog.add_file_to_catalog(self.config['lyrics_file_path']) self.catalog.tokenize_catalog() self.is_interactive = self.config['is_interactive']
def __init__(self, **kwargs): self.all_forms = self.get_all_items('form') self.all_catalogs = self.get_all_items('catalog') self.form_id = self.get_item_id_from_file(kwargs['file_name'], 'form') self.catalog_id = self.get_item_id_from_file(kwargs['file_name'], 'catalog') self.geolocation = kwargs["geolocation"] self.start_timestamp = kwargs["start_timestamp"] self.end_timestamp = kwargs["start_timestamp"] #self.created_at = kwargs["created_at"] #self.answers = kwargs["answers"] #self.is_catalog = kwargs["is_catalog"] self.catalog = Catalog()
def start_reconstruction(identifier): """ Metoda ustawiająca dane konkretnego id do systemu kolejkowego w celu rekonstrukcji danych :param pat_name: Identyfikator. """ examination = Catalog.get(identifier) #Zmiana statusu w bazie danych examination.status=Status.reco_queued Catalog.commit() UI.communique("Reconstruction queued") return
def register(identifier): """ Metoda odpowiedzialna za komunikacje z klastrem obliczeniowym. Zarejestrowanie rekonstrukcji obrazu medycznego :param pat_name: Identyfikator. """ #Generowanie identyfikatora zadania na klastrze i zapis do bazy danych examination = Catalog.get(identifier) #Zmiana statusu w bazie danych examination.status=Status.reco_registered Catalog.commit() UI.communique("Reconstruction registered") return
def scan(identifier): """ Funkcja komunikuje się ze skanerem PET wykorzystując dedykowany niskopoziomowy protokół. Wysyła polecenie rozpoczęcia badania wraz z plikiem DICOM opisującym przeprowadzaną procedurę. :param pat_name: Identyfikator. """ examination = Catalog.get(identifier) #Zmiana statusu w bazie danych examination.status=Status.scanning Catalog.commit() UI.communique("Scanning patient") return
def fun_article_update_src(user, article_id=0, article_type='blog', src_type='code', alias='1', title='', body='', source='', code_type='python', math_type='inline', group_id='-1'): ''' update the lib of the article, article_type is about|blog|comment article_id must be have need alias too, then we find the lib _id, create the object, and set of it ''' if article_type not in Article_Type: return [1, '不支持当前文章类型!'] if src_type not in Agree_Src: return [1, '暂不支持当前资源的类型!'] if title is None: return [1, '请您填写标题/名称!'] if src_type != 'image': if body is None: if src_type == 'r': if source is None: return [1, '出处不能为空'] if re.search(r'^(http|https|ftp):\/\/.+$', source) is None: return [1, '请填写链接地址或者引用真实内容!'] body = '' else: return [1, '内容不能为空!'] if src_type == 'math': if math_type not in ['display', 'inline']: math_type = 'display' body = math_encode( xhtml_unescape(body) ) if src_type == 'code': if code_type not in Agree_Code: return [1, '目前不支持此类型的程序代码!'] if article_type == "about": AF_Object = About(_id=user.about._id) elif article_type == "book-about": try: book = Catalog(_id=group_id) AF_Object = book.about limit = book.authority_verify(user) if test_auth(limit, A_WRITE) is False: return [1, '您无权修改!'] pass except Exception, err: logging.error(traceback.format_exc()) logging.error('Catalog not exist, id %s' % group_id) return [1, '未找到该知识谱!']
def mpps_completed(identifier): """ Funkcja N-CREATE zmieniająca status na completed- oznacza, że badanie zostało zakończone. :param pat_name: Identyfikator. """ pydicom.dcmread("mpps_completed.dcm", force=True) examination= Catalog.get(identifier) #Zapis pliku mpps_discontinued.dcm do bazy danych examination.path="C:\\Users\\Anna\\Desktop\\pynetdicom_git_clone\\pynetdicom3\\pynetdicom3\\apps\\findscu\\mpps_completed.dcm" #Zmiana statusu w bazie danych examination.status=Status.dicom_completed Catalog.commit() return
def getExpByName(self, src_names, egy_edges): exp = None cat = Catalog() for s in src_names: src = cat.get_source_by_name(s) if exp is None: exp = self.eval(src['RAJ2000'], src['DEJ2000'], egy_edges) else: exp += self.eval(src['RAJ2000'], src['DEJ2000'], egy_edges) return exp
def get_output_data(identifier, file_path_cluster): """ Metoda odpowiedzialna za wysłanie wyjsciowych plików z klastra obliczeniowego do kontrolera modalnosci :param pat_name: Identyfikator. """ final_image_controller = "C:\\Users\\Anna\\Desktop\\pynetdicom_git_clone\\pynetdicom3\\pynetdicom3\\apps\\findscu\\controller\\" shutil.copy(file_path_cluster,final_image_controller) examination = Catalog.get(identifier) #Zmiana statusu w bazie danych examination.status=Status.send_final_data Catalog.commit() UI.communique("The results have been sent") return
def mpps_inprogress(identifier): """ Funkcja N-CREATE zmieniająca status na in-progress- czyli badanie jest w trakcie wykonywania. :param pat_name: Identyfikator. """ pydicom.dcmread("mpps_inprogress.dcm", force=True) examination= Catalog.get(identifier) #Zapis pliku mpps_discontinued.dcm do bazy danych examination.path="C:\\Users\\Anna\\Desktop\\pynetdicom_git_clone\\pynetdicom3\\pynetdicom3\\apps\\findscu\\mpps_inprogress.dcm" #Zmiana statusu w bazie danych examination.status=Status.dicom_inprogress Catalog.commit() return
def fun_article_new_src(user, article_id='-1', article_type='blog', src_type='code', title='', body='', source='', code_type='python', math_type='inline', father_id='-1', group_id='-1'): if article_type not in Article_Type: return [1, '不支持当前文章类型!'] if src_type not in Agree_Src: return [1, '不支持当前类型的资源!'] if title is None: return [1, '名称不能为空!'] if body is None: if src_type != 'reference': return [1, '内容不能为空!'] else: if re.search(r'^(http|https|ftp):\/\/.+$', source) is None: return [1, '请填写链接地址或者引用真实内容!'] body = '' else: if src_type == 'math': body = math_encode(escape.xhtml_unescape(body)) elif src_type == 'code': if code_type not in Agree_Code: return [1, '请选择代码种类!'] if article_type == "about": AF_Object = user.about article_id = str(user.about._id) isnew = False elif article_type == "book-about": isnew = False try: book = Catalog(_id=group_id) AF_Object = book.about limit = book.authority_verify(user) if test_auth(limit, A_WRITE) is False: return [1, '您无权修改摘要!'] except Exception, err: logging.error(traceback.format_exc()) logging.error('Catalog not exist, id %s' % group_id) return [1, '未找到知识谱!']
def test_AddInventory(self): price = 2.52 item = { "name": "carrots", "unit": "kg", "price": price } store = { "name": "Zehrs", "location": "guelph" } c = Catalog("zehrs") c.add(item, store) self.assertEqual (c.price_by_store("carrots", store), price)
def build_final_image(identifier, path2): """ Metoda build_final_image odpowiedzialna jest za budowanie końcowego obrazu oraz deanonimizacje :param pat_name: Identyfikator. """ cluster_path='C:\\Users\\Anna\\Desktop\\pynetdicom_git_clone\\pynetdicom3\\pynetdicom3\\apps\\findscu\\cluster\\' final_file_cluster_path = os.path.join( cluster_path, str(identifier)+"_final") os.rename(path2,final_file_cluster_path) examination = Catalog.get(identifier) #Zmiana statusu w bazie danych examination.status=Status.build_final_image Catalog.commit() UI.communique("Final image construction in progress") return final_file_cluster_path
def study_slc(self): """ receives nothing apply single link to all regions of japan saves the results on pickle objects """ #defines COMPLETE_CATALOG = "../catalogs/new_jma.txt" TEMP_CATALOG1 = '../catalogs/temp1.txt' TEMP_CATALOG2 = '../catalogs/temp2.txt' DEBUG = '../catalogs/reduced_jma.txt' BOUND = 0 MAG_THRESH = 1 # auxiliary classes needed cat = Catalog() slc1 = Single_Link() # distance threshold, the distance to use when the distance calculated # by the single link cluster returns negative dist_thresh = [12.0] # dictionary containing information of the quakes and region names regions = {} regions["kanto"] = [cat.get_kanto_bounds(), 0.0] #regions["kansai"] = [cat.get_kansai_bounds(), 0.0] regions["tohoku"] = [cat.get_tohoku_bounds(), 0.0] regions["east_japan"] = [cat.get_east_japan_bounds(), 0.0] ## try to run for the whole japan catalog #japan_bound = [0.0, 400.0, 0.0, 400.0] #regions["japan"] = [japan_bound, 0.0] # iterate throgh every region for region, info in regions.items(): for thresh in dist_thresh: # obtain catalog for the region cat.select_geographically(info[BOUND], COMPLETE_CATALOG, TEMP_CATALOG1) cat.get_mag_threshold(TEMP_CATALOG1, TEMP_CATALOG2, info[MAG_THRESH], index = 2) # apply single link to the catalog and get quake #quakes = slc1.apply_single_link(TEMP_CATALOG2, region, thresh) # save array as a pickle object in the correct location # save declustered quake array using pickle #path = 'data_persistence/declustered_array_slc_' + region + '_' + str(round(thresh, 3)) #file_aux1 = open(path, 'wb') #pickle.dump(quakes, file_aux1) # classify quakes by making the centroid the medoid of the cluster slc1.classify_quakes(path, 'closest') quake_path = path + '_classified_medoid' # clean up call(["rm", TEMP_CATALOG1, TEMP_CATALOG2])
def getExpByName(self,src_names,egy_axis,cth_axis=None,weights=None): exph = None cat = Catalog.get() for s in src_names: if s == 'ridge': glon = np.linspace(0,360.,30.) glat = np.zeros(30) ra,dec = eq2gal(glon,glat) for i in range(30): # print i, ra[i], dec[i] h = self.eval2(ra[i],dec[i],egy_axis, cth_axis,wfn=weights) if exph is None: exph = h else: exph += h else: src = cat.get_source_by_name(s) h = self.eval2(src['RAJ2000'], src['DEJ2000'],egy_axis, cth_axis,wfn=weights) if exph is None: exph = h else: exph += h exph /= float(len(src_names)) return exph
class StyleGenerator: def __init__(self, geomap): self.gm = geomap self.cat = Catalog(GeoServerConfig.BaseGeoserverUrl + "rest", GeoServerConfig.GeoserverAdminUser, GeoServerConfig.GeoserverAdminSecret) def createStyle(self): dmus = self.gm.descriptionofmapunits_set.all() sldResponse = dmuContentNegotiation("application/sld", dmus) if sldResponse.status_code == 200: sld = unicode(sldResponse.content, errors='ignore') if self.cat.get_style(self.gm.name) is not None: overwrite = True else: overwrite = False try: self.cat.create_style(self.gm.name, sld, overwrite) except Exception as ex: raise ex
def fun_get_user_book(user, page=1, page_cap=8): book_dict = user.managed_catalog_lib.load_all() # print book_dict total_num = len(book_dict) if total_num == 0: return 0, list() book_list = book_dict.keys() book_list.sort() book_list.reverse() min_index, max_index = get_index_list_by_page(book_list, page=page, page_cap=page_cap) book_instance = Catalog.get_instances("_id", book_list[min_index:max_index]) res_list = [] for item in book_instance: if item.node_sum == 0: complete = 0 else: complete = str(int(100 * (float(item.complete_count) / item.node_sum))) res_list.append( { "book_id": item._id, "book_name": item.name, "book_owner_name": user.name, "book_owner_id": user._id, "book_complete": complete, "book_complete_count": item.complete_count, "book_node_sum": item.node_sum, } ) # print res_list res_list.reverse() return total_num, res_list
def apply_decluster(self): """ apply window method to the whole catalog and write mainshocks on file """ # get instances of classes we'll need catalog = Catalog() kmeans = KMeans() # from the catalog we want, get earthquakes array on memory earthquake_array = catalog.get_earthquake_array('../catalogs/new_jma.txt') # decluster array, separating mainshocks and aftershocks declustered_array = kmeans.do_kmeans(earthquake_array) # record the mainshocks on a catalog catalog.record_mainshocks(declustered_array, file_write='../results/mainshocks.txt', file_read='../catalogs/jma.txt')
def __init__(self, spider=None, input_directory='raw_output'): """ :param spider: spider :type spider: CourseSpider :return: """ if spider is None: self.input_directory = input_directory self.catalog = Catalog() self.name = self.input_directory self.timestamp = date.today() else: self.input_directory = spider.get_raw_output() self.catalog = Catalog() self.name = spider.get_name() self.timestamp = spider.get_timestamp()
def read_slabdata(slabcat=None): ''' Creates slab objects for the slabs identified by Fukao & Obayashi (2013) ''' slabs = {} #This dictionary contains information about the boundaries of the slabs mentioned in the #Fukao and Obayashi paper #The coordinates are entered as follows: #[minlon,minlat,maxlon,maxlat] slabs['Honshu'] = {'Bounds' : [109,35,159,45]} slabs['Bonin'] = {'Bounds' : [109,23,159,35]} slabs['Mariana'] = {'Bounds' : [120,8.3,152,25]} slabs['Java'] = {'Bounds' : [89,-10,129,19]} slabs['Phillippine'] = {'Bounds' : [114,5,128,18]} slabs['Tonga'] = {'Bounds' : [157,-26,190,-10]} slabs['Kermadec'] = {'Bounds' : [157,-37,190,-26]} slabs['Peruvian'] = {'Bounds' : [270,-15,307,6.7]} slabs['Chilean'] = {'Bounds' : [270,-45,307,-15]} slabs['Central_American'] = {'Bounds' : [249,4.5,282,33]} #For these two regions, we need an oblique box. The coodinates are entered as follows #[upper left,lower left,lower right,upper right] slabs['Kurile_N'] = {'Bounds' : [134.5,65.5,130.5,61.5,158.5,43,162.5,47]} slabs['Kurile_S'] = {'Bounds' : [127.5,58.5,123.5,54.5,151.5,36,155.5,40]} if slabcat: FukaoSlabs = slabcat else: FukaoSlabs = Catalog() for slabname in slabs: newslab = Slab(slabname) #add directory to the slabs newslab.add_Fukao_slab_details(slabs[slabname]) FukaoSlabs.append(newslab) return FukaoSlabs
def __init__(self, geomap): self.gsmlpModelsToLoad = [ get_model("ncgmp", modelName) for modelName in self.gsmlpModelNames ] self.gm = geomap self.cat = Catalog(GeoServerConfig.BaseGeoserverUrl + "rest", GeoServerConfig.GeoserverAdminUser, GeoServerConfig.GeoserverAdminSecret) self.gsmlpWs = self.cat.get_workspace("gsmlp") if self.gsmlpWs is None: self.gsmlpWs = self.cat.create_workspace("gsmlp", "http://xmlns.geosciml.org/geosciml-portrayal/1.0") self.cat.create_postgis_datastore(self.gsmlpWs)