class RunGraph: def __init__(self, filename='testFile.txt'): self.filename = filename self.extractObject = Extract(self.filename) self.nodes = self.extractObject.nodes def add_node(self, node): self.extractObject.add_node(node) self.extractObject.write_to_json() def get_results(self, src, dest): #This function takes a source and destination node adn calculate the path between them. calculate_obj = Graph(self.nodes) came_from, cost = calculate_obj.calculate_path(self.nodes[src],self.nodes[dest]) path = calculate_obj.reconstruct_path(came_from, self.nodes[src], self.nodes[dest])#reverse the path is_path_valid = calculate_obj.validate_rules(path)#enforce validation rules final_data = dict() if is_path_valid: final_data['path'] = path final_data['cost'] = cost[path[-1]] return final_data else: return is_path_valid
def __init__(self, opt, device): super(STR, self).__init__() self.opt = opt # Trans # self.Trans = Trans.TPS_SpatialTransformerNetwork(F = opt.num_fiducial, # i_size = (opt.imgH, opt.imgW), # i_r_size= (opt.imgH, opt.imgW), # i_channel_num=opt.input_channel, # device = device) #Extract if self.opt.extract =='RCNN': self.Extract = self.Extract = Extract.RCNN_extractor(opt.input_channel, opt.output_channel) elif 'efficientnet' in self.opt.extract : self.Extract = Extract.EfficientNet(opt) elif 'resnet' in self.opt.extract : self.Extract = Extract.ResNet_FeatureExtractor(opt.input_channel, opt.output_channel) else: raise print('invalid extract model name!') # self.Extract = Extract.RCNN_extractor(opt.input_channel, opt.output_channel) # self.Extract = Extract.ResNet_FeatureExtractor(opt.input_channel, opt.output_channel) self.FeatureExtraction_output = opt.output_channel # (imgH/16 -1 )* 512 self.AdaptiveAvgPool = nn.AdaptiveAvgPool2d((None,1)) # imgH/16-1 -> 1 # Sequence self.Seq = nn.Sequential( BidirectionalLSTM(self.FeatureExtraction_output, opt.hidden_size, opt.hidden_size), # BidirectionalLSTM(1536, opt.hidden_size, opt.hidden_size), BidirectionalLSTM(opt.hidden_size, opt.hidden_size, opt.hidden_size)) self.Seq_output = opt.hidden_size #Pred self.Pred = Pred.Attention(self.Seq_output, opt.hidden_size, opt.num_classes, device=device)
def obtain_month_states(data): months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] state = ex.obtain_state(data) state_list = state[:,0] fire_list = [] for st in state_list: state_fires = [] print(st) for i in range (1992,2016): for k in months: month_num = months.index(k) + 1 result = ex.obtain_monthly_state(data,st,i,k) result = result['discovery_month'].value_counts().sort_index() if result.empty: result = 0 fire = [i, month_num, result] state_fires.append(fire) else: X = np.array([i, month_num]) X = X.astype(int) Y = np.array([result]) Y = Y.astype(int) Y = Y.squeeze() fire = np.hstack((X, Y)).tolist() state_fires.append(fire) fire_list.append(state_fires) return fire_list
def __init__(self, opt, device): super(model, self).__init__() self.opt = opt #Trans self.Trans = Trans.TPS_SpatialTransformerNetwork( F=opt.num_fiducial, i_size=(opt.imgH, opt.imgW), i_r_size=(opt.imgH, opt.imgW), i_channel_num=opt.input_channel, device=device) #Extract if self.opt.extract == 'RCNN': self.Extract = self.Extract = Extract.RCNN_extractor( opt.input_channel, opt.output_channel) elif 'efficientnet' in self.opt.extract: self.Extract = Extract.EfficientNet(opt) elif 'resnet' in self.opt.extract: self.Extract = Extract.ResNet_FeatureExtractor( opt.input_channel, opt.output_channel) else: raise print('invalid extract model name!') # self.AdaptiveAvgPool = nn.AdaptiveAvgPool2d((None,1)) # imgH/16-1 -> 1 # Position aware module self.PAM = PositionEnhancement.PositionAwareModule( opt.output_channel, opt.hidden_size, opt.output_channel, 2) self.PAttnM_bot = PositionEnhancement.AttnModule( opt, opt.hidden_size, opt.bot_n_cls, device) self.PAttnM_mid = PositionEnhancement.AttnModule( opt, opt.hidden_size, opt.mid_n_cls, device) self.PAttnM_top = PositionEnhancement.AttnModule( opt, opt.hidden_size, opt.top_n_cls, device) # Hybrid branch self.Hybrid_bot = Hybrid.HybridBranch(opt.output_channel, opt.batch_max_length + 1, opt.bot_n_cls, device) self.Hybrid_mid = Hybrid.HybridBranch(opt.output_channel, opt.batch_max_length + 1, opt.mid_n_cls, device) self.Hybrid_top = Hybrid.HybridBranch(opt.output_channel, opt.batch_max_length + 1, opt.top_n_cls, device) # # Dynamically fusing module self.Dynamic_fuser_top = PositionEnhancement.DynamicallyFusingModule( opt.top_n_cls) self.Dynamic_fuser_mid = PositionEnhancement.DynamicallyFusingModule( opt.mid_n_cls) self.Dynamic_fuser_bot = PositionEnhancement.DynamicallyFusingModule( opt.bot_n_cls)
def save_csv(forest_path): data = ex.load_dataset(forest_path) data = obtain_month_states(data) # fs state = ex.load_dataset(forest_path) state = ex.obtain_state(state) state_list = state[:,0] state_list = state_list.tolist() for i in state_list: index = state_list.index(i) values = data[index] np.savetxt("state_fire/" + i + "_total" + ".csv", values, delimiter=',', fmt='%d') return
def startExtract(conn): if (input_db_details['dbtype'] == 'csv'): rows = ext.processExtractCSV(input_db_details['csvloc']) dataDFList = ['csv', rows] elif (input_db_details['dbtype'] == 'tweets'): rows = twt.extractTweets(input_db_details['search_words'], input_db_details['date_since']) dataDFList = [['tweets', rows]] else: dataDFList = ext.startExtractProcess(conn) return dataDFList
def test_download_data(): """ Evalúa la función extracción de datos """ temp_db = pg_temp.TempDB(databases=['testdb']) select_query = "SELECT * FROM pg_attribute" conn = Extract.db_connection() df = Extract.download_data(conn, select_query) temp_db.cleanup() assert df.size > 0
def __init__(self, opt, device): super(SCATTER, self).__init__() self.opt = opt #Trans self.Trans = Trans.TPS_SpatialTransformerNetwork(F = opt.num_fiducial, i_size = (opt.imgH, opt.imgW), i_r_size= (opt.imgH, opt.imgW), i_channel_num=opt.input_channel, device = device) #Extract if self.opt.extract =='RCNN': self.Extract = self.Extract = Extract.RCNN_extractor(opt.input_channel, opt.output_channel) elif 'efficientnet' in self.opt.extract : self.Extract = Extract.EfficientNet(opt) else: raise print('invalid extract model name!') # self.Extract = Extract.ResNet_FeatureExtractor(opt.input_channel, opt.output_channel) self.FeatureExtraction_output = opt.output_channel # (imgH/16 -1 )* 512 self.AdaptiveAvgPool = nn.AdaptiveAvgPool2d((None,1)) # imgH/16-1 -> 1 # VISUAL FEATURES self.VFR = VFR.Visual_Features_Refinement(kernel_size = (3,1), num_classes = opt.num_classes, in_channels = self.FeatureExtraction_output, out_channels=1, stride=1) # CTC DECODER self.CTC = CTC.CTC_decoder(opt.output_channel, opt.output_channel, opt.num_classes, device) # Selective Contextual Refinement Block # self.SCR_1 = SCR.Selective_Contextual_refinement_block(input_size = self.FeatureExtraction_output, # hidden_size = int(self.FeatureExtraction_output/2), # output_size = self.FeatureExtraction_output, # num_classes = opt.num_classes, decoder_fix = False, device = device, # batch_max_length = opt.batch_max_length) # self.SCR_2 = SCR.Selective_Contextual_refinement_block(input_size = self.FeatureExtraction_output, # hidden_size = int(self.FeatureExtraction_output/2), # output_size = self.FeatureExtraction_output, # num_classes = opt.num_classes, decoder_fix = False, device = device, # batch_max_length = opt.batch_max_length) # self.SCR_3 = SCR.Selective_Contextual_refinement_block(input_size = self.FeatureExtraction_output, # hidden_size = int(self.FeatureExtraction_output/2), # output_size = self.FeatureExtraction_output, # num_classes = opt.num_classes, decoder_fix = True, device = device, # batch_max_length = opt.batch_max_length) self.SCR = SCR.SCR_Blocks(input_size = self.FeatureExtraction_output, hidden_size = int(self.FeatureExtraction_output/2), output_size = self.FeatureExtraction_output, num_classes = opt.num_classes, device = device, batch_max_length = opt.batch_max_length, n_blocks=opt.scr_n_blocks)
def activity_users(): apname_last = Extract.extractSpecific(createFiles.realFile, 'APName').replace('APName', '').split()[-1] ssid = Extract.extractSpecific(createFiles.realFile, 'SSID').split('SSID')[-1] user_name = Extract.extractSpecific(createFiles.realFile, 'Username').split('Username')[-1] mac_address = Extract.client_mac(createFiles.realFile)[-1] ip_address = Extract.extractSpecific(createFiles.realFile, 'IPAddress').split('IPAddress')[-1] if mac_address is not '0:0:0:0:0:0': ExportToDB.send_to_db(mac_address, ip_address, apname_last, ssid, user_name)
def deauth_users(): apname_last = Extract.extractSpecific(createFiles.realFile, 'APName').replace('APName', '').split()[-1] reason_code = Extract.extractSpecific(createFiles.realFile, 'ReasonCode').split('ReasonCode')[-1] user_ip_address = Extract.extractSpecific( createFiles.realFile, 'UserIpAddress').split('UserIpAddress')[-1] user_name = Extract.extractSpecific(createFiles.realFile, 'UserName').split('UserName')[-1] mac_address = Extract.client_mac(createFiles.realFile)[-1] if mac_address is not '0:0:0:0:0:0': ExportToDB.disassociate_users(mac_address, user_ip_address, apname_last, reason_code, user_name)
def upload_file(): # check if the post request has the file part if 'file' not in request.files: resp = jsonify({'message': 'No file part in the request'}) resp.status_code = 400 return resp file = request.files['file'] if file.filename == '': resp = jsonify({'message': 'No file selected for uploading'}) resp.status_code = 400 return resp if file and allowed_file(file.filename): filename = secure_filename(file.filename) file_path = os.path.join(app.config['UPLOAD_FOLDER'], filename) file.save(file_path) resp = jsonify({'message': 'File successfully uploaded'}) dpi = 300 documentText, fname = OCR.Convert(file_path, dpi, str(1)) data = Extract.Info(fname) data = Align.restructure(data) return str(data) else: resp = jsonify({'message': 'Allowed file type is pdf'}) resp.status_code = 400 return resp
def sky_median(self, X=2, Y=10, distance=5): '''Generates a sky spectrum from the median of a large sky area. Args: X/Y: the X/Y position of the center coordinate distance: the radius around the center coordinate Returns: {'wave_nm': wavelength of sky spectrum, 'spec_adu', the median sky spectrum, 'all_spec': and a matrix of spectra, 'num_spec': The number of spectra} Side Effects: Stores the returned values into the class''' if self.positions[0]=='MeanX': if X<60: X = 1024 Y=1024 if distance<60: distance*= 60 res = Extract.sky_median(self.KT, self.SegMap, ixmap=self.OK, pixel_shift=self.pixel_shift,X=X,Y=Y,distance=distance) return res
def spectrum_near_position(self, X, Y, distance=2, onto=None, sky_spec=None): '''Interpolate spectra in a circle radius distance arround X,Y See spectrum_near_position Args: X,Y: The X/Y position of the central spectrum small, large: the small and large radius of extraciton onto: The wavelength grid to interpolate onto, None to ignore. sky_spec: The sky spectrum to subtract Returns: {'wave_nm': wavelength of sky spectrum, 'spec_adu', the median sky spectrum, 'all_spec': and a matrix of spectra, 'num_spec': The number of spectra}''' if self.positions[0]=='MeanX': if distance<60: distance*=60 spectra = self.spectra_near_position(X,Y, distance) return Extract.interp_and_sum_spectra( spectra, onto=onto, sky_spec=sky_spec), spectra
def spectra_near_position(self, X, Y, distance=2): '''Returns all spectra within distance of (X,Y) Notice spectra_* methods return all spectra, spectrum_* methods convert spectra into a single spectrum. Args: X,Y: The X/Y position of the spectrum to extract in as distance: The extraction radius in arcsecond Returns a list of (wavelength, spectra, index). E.g.: [[array(365...1000) , array(50 .. 63), 950] ..] Example: import SegMap as SM sm = SM.SegmentationMap("/path/b_ifu20130808_23_08_44.fits_SI.mat") spec = sm.spectra_near_position(5, 5) print len (spec) >> 34 ll, ss = spec[0][0], spec[0][1] plot(ll, ss) # Plots the spectrum ''' if self.positions[0]=='MeanX': if distance<60: distance*=60 return Extract.spectra_near_position(self.KT, self.SegMap, X,Y, distance=distance, ixmap=self.OK, pixel_shift=self.pixel_shift)
def TestMod(self, filepath, modname, basedir=None): ''' Returns 0 if the test didn't complete Returns 3 if the test found inconsistencies else returns the a dict with containing the tp2 name(tpname) and the path to the main folder(foldpath) of the mod :param basedir: :return: ''' if basedir is None: basedir = self.dir #regexlist = [rb'(?:[^\r\n\t\f\v /]+/)*(?:[sS][eE][tT][uU][pP]-)?(?P<tpname>[^\r\n\t\f\v /]*?)\.[Tt][Pp]2', # rb'((?:[^\r\n\t\f\v /]+?/)*?)(%(tpname)s)(?=\r?$)(?m)'] if isinstance(modname, str): modname = modname.encode('ascii') regexlist = [ rb'((?:[^\r\n\t\f\v /]+?/)*?)(%s)(?:/backup\r?$)?(?=\r?$)(?mi)' % modname ] res = Extract.Check_Archive(filepath, basedir=self.dir, regex=regexlist) logging.info('Check_Archive returned {}'.format(res)) if isinstance(res, int): return res else: return res
def last_twelve(data): months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] data = ex.obtain_month_year(data) for entry in data: entry[1] = months.index(entry[1]) + 1 data = data.astype(int) data = data[np.lexsort((data[:, 1], data[:, 0]))] data = np.delete(data, 0, 0) two_fifteen = data[-12:] x1 = data[:, [0, 1]] y1 = data[:, 2] x = x1[:-12] y = y1[:-12] model = make_pipeline(PolynomialFeatures(5), sk.Ridge(alpha=0.001, fit_intercept=False)) model.fit(x, y) y_poly_pred = model.predict(x) for i in range(1,13): values = model.predict([[2015,i]]) x = np.vstack([x,[2015,i]]) y_poly_pred = np.append(y_poly_pred,values) y = np.append(y,values) temp = list(range(1,y_poly_pred.size + 1)) print(two_fifteen) print(y[-12:]) plt.scatter(temp,y) plt.plot(temp, y_poly_pred, color='red') plt.title("Regression of fires every month each year - prediction of last 12") plt.show()
def test_db_connection(): """ Evalúa la función de conexión a la base de datos """ connection = Extract.db_connection() connection.close() assert str(type(connection)) == "<class 'psycopg2.extensions.connection'>"
def month_year_pred(data): months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] data = ex.obtain_month_year(data) for entry in data: entry[1] = months.index(entry[1]) + 1 data = data.astype(int) data = data[np.lexsort((data[:,1],data[:,0]))] data = np.delete(data,0,0) x = data[:,[0,1]] y = data[:,2] model = make_pipeline(PolynomialFeatures(5), sk.Ridge(alpha=0.001, fit_intercept=False)) model.fit(x, y) y_poly_pred = model.predict(x) for i in range(1,13): year = 2016 month = i value = model.predict([[year,month]]) x = np.vstack([x, [year,month]]) y_poly_pred = np.append(y_poly_pred,value) y = np.append(y,value) temp = np.zeros((x[:,0].size,1)) for i in range(0,x[:,0].size): temp[i] = i + 1 plt.scatter(temp,y) plt.plot(temp, y_poly_pred, color='red') plt.title("Regression of fires every month each year") plt.show()
def rogue_ssid_detected(): # ApRogueMode rogue_detected = Extract.extractSpecific(createFiles.realFile, 'ApRogueDetected').replace( 'ApRogueDetected', '').split()[-1] rogue_mode = Extract.extractSpecific(createFiles.realFile, 'ApRogueMode').replace( 'ApRogueMode', '').split()[-1] rogue_apname_last = Extract.extractSpecific(createFiles.realFile, 'APName').replace( 'APName', '').split()[-1] rogue_ssid = Extract.extractSpecific( createFiles.realFile, 'ApRogueApSsid').split('ApRogueApSsid')[-1] rogue_detected_ch = Extract.extractSpecific( createFiles.realFile, 'ApRogueDetectedChannel').split('ApRogueDetectedChannel')[-1] rogue_mac_address = Extract.extractSpecific( createFiles.realFile, 'ApRogueApMacAddress').split('ApRogueApMacAddress')[-1] rogue_rssi = Extract.extractSpecific(createFiles.realFile, 'ApRSSI').split('ApRSSI')[-1] ExportToDB.ssid_rogue_detected(rogue_mode, rogue_ssid, rogue_apname_last, rogue_detected_ch, rogue_mac_address, rogue_rssi)
def __init__(self, opt, device): super(STR, self).__init__() self.opt = opt #Trans self.Trans = Trans.TPS_SpatialTransformerNetwork(F = opt.num_fiducial, i_size = (opt.imgH, opt.imgW), i_r_size= (opt.imgH, opt.imgW), i_channel_num=opt.input_channel, device = device) #Extract if self.opt.extract =='RCNN': self.Extract = self.Extract = Extract.RCNN_extractor(opt.input_channel, opt.output_channel) elif 'efficientnet' in self.opt.extract : self.Extract = Extract.EfficientNet(opt) else: raise print('invalid extract model name!') # self.Extract = Extract.ResNet_FeatureExtractor(opt.input_channel, opt.output_channel) self.FeatureExtraction_output = opt.output_channel # (imgH/16 -1 )* 512 self.AdaptiveAvgPool = nn.AdaptiveAvgPool2d((None,1)) # imgH/16-1 -> 1 # Sequence self.Seq = nn.Sequential( BidirectionalLSTM(self.FeatureExtraction_output, opt.hidden_size, opt.hidden_size), BidirectionalLSTM(opt.hidden_size, opt.hidden_size, opt.hidden_size)) self.Seq_output = opt.hidden_size #Pred if opt.pred =='arcface': print('using ArcFace Loss') self.Pred_bot = Pred_jamo_arcface.Attention(self.Seq_output, opt.hidden_size, opt.bottom_n_cls, device=device) self.Pred_mid = Pred_jamo_arcface.Attention_mid(self.Seq_output, opt.hidden_size, opt.middle_n_cls, opt.bottom_n_cls, device=device) self.Pred_top = Pred_jamo_arcface.Attention_top(self.Seq_output, opt.hidden_size, opt.top_n_cls, opt.middle_n_cls, opt.bottom_n_cls, device=device) else : self.Pred_bot = Pred_jamo.Attention(self.Seq_output, opt.hidden_size, opt.bottom_n_cls, device=device) self.Pred_mid = Pred_jamo.Attention_mid(self.Seq_output, opt.hidden_size, opt.middle_n_cls, opt.bottom_n_cls, device=device) self.Pred_top = Pred_jamo.Attention_top(self.Seq_output, opt.hidden_size, opt.top_n_cls, opt.middle_n_cls, opt.bottom_n_cls, device=device)
def main(dir, *args): extract = Extract.Extract() extract.EXECUTE = E extract.DL = 0 print DATE extract.run('EXTRACT', DATE, dir, *args) ex_outdirs = copy.copy(extract.OUTDIRS) extract.OUTDIRS = [] L_key = map_tags(extract, ex_outdirs) map_outdirs = copy.copy(extract.OUTDIRS) extract.OUTDIRS = [] merge_hdfs(extract, map_outdirs, L_key)
def year_poly_reg(data): data = ex.obtain_total_year(data) data = np.delete(data,0,0) x = data[:,0] x = x.reshape((-1,1)) y = data[:,1] model = make_pipeline(PolynomialFeatures(5), sk.LinearRegression()) model.fit(x,y) pred_y = model.predict(x) plt.scatter(x,y) plt.plot(x,pred_y, color='red') plt.show()
def CollideWatchStage(Track1CPA): UTC = Track1CPA[7] Ltm = Extract.timestamp_to_date(UTC) Hour = float(Ltm[11:13]) if (0 <= Hour and Hour < 4) or (12 <= Hour and Hour < 16): # First officer return 1 if (4 <= Hour and Hour < 8) or (16 <= Hour and Hour < 20): # Second officer return 2 if (8 <= Hour and Hour < 12) or (20 <= Hour and Hour < 24): # Third officer return 3
def evaluatee(url): page = requests.get(url) soup = BeautifulSoup(page.content, 'html.parser') rs = soup.prettify() r = rs.encode() e = Extract(url, "test", 1, rs, 1, rs, rs) e.DoEvaluate() abc = 0 if (b'\xc2\xa9' in r): abc = 1 else: abc = 0 #print(e.phishScore.DomainName) links = soup.find_all("a") c = 0 for i in links: if e.phishScore.DomainName in links: c += 1 title = -1 if e.phishScore.DomainName in soup.find_all("title"): title = 1 X = [ e.phishScore.HTTPSPresent, e.phishScore.DomainLength, e.phishScore.NonAlphabetical, rs.count(e.phishScore.DomainName), e.phishScore.OutsideRationInBody, abc, title ] #e.phishScore.TitleContainDomainName] X = np.array(X) X = X.reshape(1, -1) classifier = pickle.load(open("phishing_model.pkl", 'rb')) predict = classifier.predict(X) #print(predict) return predict if predict == 0: print("Phishing Website") else: print("Legitimate site")
def uptime_instance(): #uptime instance uptime = Extract.extractSpecific(createFiles.realFile, 'UpTimeInstance').replace( 'UpTimeInstance', '') pattern = re.compile(r'(?:[0-9]:?){6}') uptime_non_zero = re.findall(pattern, uptime) #filter 0 out #pick last element in list lastet_uptime = uptime_non_zero[-1] wlc_coe = Extract.extractSpecific( createFiles.realFile, 'UDP: 172.30.232.2:32768-172.30.232.250:162').split('UDP: ')[-1] wlc_eng = Extract.extractSpecific( createFiles.realFile, 'UDP: 172.30.253.2:32768-172.30.232.250:162').split('UDP: ')[-1] if wlc_eng == '172.31.253.2:32769-172.30.232.250:162': ExportToDB.uptime_wlc(ip_address='172.31.253.2 - EnG', date_string=lastet_uptime) elif wlc_coe == '172.30.232.2:32768-172.30.232.250:162': ExportToDB.uptime_wlc(ip_address='172.31.232.2 - CoE', date_string=lastet_uptime)
def graph_total_cause(data): """ Graph to show use the total causes of :param data: :return: """ data = ex.obtain_total_cause(data) x = data[:,0] y = data[:,1] y = y.astype(int) plt.xticks(rotation=90) plt.bar(x,y) plt.savefig('total_causes.png') plt.show()
def draw(self): if self.subtract_sky: sky = self.SM[self.spx_ix].sky_median() sky_spec = sky["wave_nm"], sky["spec_adu"] else: sky_spec = None x, y, v = Extract.segmap_to_img( self.SM[self.spx_ix].SegMap, sky_spec=sky_spec, minl=500, maxl=700, positions=self.positions ) self.Xs = x self.Ys = y self.Values = v self.draw_selection_circle()
def draw(self): if self.subtract_sky: sky = self.SM[self.spx_ix].sky_median() sky_spec = sky['wave_nm'], sky['spec_adu'] else: sky_spec = None x, y, v = Extract.segmap_to_img(self.SM[self.spx_ix].SegMap, sky_spec=sky_spec, minl=500, maxl=700, positions=self.positions) self.Xs = x self.Ys = y self.Values = v self.draw_selection_circle()
def year_pred_poly(data): data = ex.obtain_total_year(data) data = np.delete(data,0,0) x = data[:,0] x = x.reshape((-1,1)) y = data[:,1] model = make_pipeline(PolynomialFeatures(5), sk.Ridge(alpha=0.1, fit_intercept=False )) model.fit(x,y) ridge = model.named_steps['ridge'] pred_y = model.predict(x) for i in range(1,5): year = 2015+i value = model.predict([[year]]) x = np.vstack([x,[year]]) pred_y = np.append(pred_y,value) y = np.append(y,value) plt.scatter(x,y) plt.plot(x,pred_y, color='red') plt.show()
def spectra_in_annulus(self, X, Y, small=4, large=6): '''Returns all spectra in the annulus between radius small and large. Notice spectra_* methods return all spectra, spectrum_* methods convert spectra into a single spectrum. Args: X,Y: The X/Y position of the central spectrum small,large: The small and large radius of extraction Returns: List of spectra, see spectra_near_position''' if self.positions[0]=='MeanX': if small<60: small *= 60 if large<60: large *= 60 return Extract.spectra_in_annulus(self.KT, self.SegMap, X, Y, small=small, large=large, ixmap=self.OK, pixel_shift=self.pixel_shift)
def draw(self, figure_number=1, minl=450, maxl=800, subtract_sky=False, outfile=None, cmin=None, cmax=None): '''Draw a figure showing the segmentation map cube Args: figure_number: Plot figure number minl/maxl: Minimum/Maximum wavelength to sum over outfile: The path to the output file Returns: Nothing Side Effects: Plots a new figure(figure_number) with the data cube. ''' sky_spec = None if subtract_sky: if self.sky_spectrum[0] is None: raise Exception("Request to subtract sky; however, no sky has been measured") sky_spec = self.sky_spectrum x,y,v = Extract.segmap_to_img(self.SegMap, minl=minl, maxl=maxl, sky_spec=sky_spec,positions=self.positions, signal_field=self.signal_field) fig = pl.figure(figure_number, figsize=(9,8)) if self.positions[0]=='MeanX': pl.xlim(-100,2200) pl.ylim(-100,2200) else: pl.xlim(-12, 17) pl.ylim(-7, 27) c = v[:] if cmin is not None: c[c<cmin] = cmin if cmax is not None: c[c>cmax] = cmax pl.scatter(x,y,c=c, s=60, picker=0.5, marker='h') return x,y,v
def spectrum_in_annulus(self, X, Y, small=4, large=6, onto=None): '''Returns the interpolated spectrum in an annulus arround X,Y See spectra_in_annulus Args: X,Y: The X/Y position of the central spectrum small, large: the small and large radius of extraciton onto: The wavelength grid to interpolate onto, None to ignore. Returns: {'wave_nm': wavelength of sky spectrum, 'spec_adu', the median sky spectrum, 'all_spec': and a matrix of spectra, 'num_spec': The number of spectra}''' if self.positions[0]=='MeanX': if small<60: small *= 60 if large<60: large *= 60 spectra = self.spectra_in_annulus(X,Y, small, large) return Extract.interp_and_sum_spectra( spectra, onto=onto), spectra
def __init__(self, segmap=None, positions=('OnSkyX', 'OnSkyY'), signal_field='SpexSpecFit', norm=None): ''' Loads the segmap, creates the KD Tree. Args: segmap: Either a filename or a segmentation map array ''' if (type(segmap) == str) or (type(segmap) == unicode): print "Loading: %s" % segmap mat = scipy.io.loadmat(segmap) self.SegMap = mat['SegmentsInfo'] else: self.SegMap = segmap self.norm = norm if norm is not None: for i in xrange(len(norm)): self.SegMap[i]['SpexSpecCenter'] /= norm[i] self.SegMap[i]['SpexSpecFit'] /= norm[i] self.positions=positions self.signal_field=signal_field self.KT, self.OK = Extract.segmap_to_kdtree(self.SegMap, positions=positions,signal_field=signal_field)
def ExtractThis(filename,destpath): import Extract as extract return extract.allNoProgress(filename,destpath)
geog = 'gcsd000a11a_e' # try: # a.intersect([featureClasses[geog], featureClasses['southernontarioboundary']], 'gcs_so') # except: # l.write("gcs_so alrady exists... Moving on") featureClasses = rd.featureClasses() start = time.clock() l.write('Started join task') # Iterate through new tables and join to feature classes for table in sorted(newTables): try: # Get list of unique columns of table from class attribute shortName = table[0:13] #print shortName columns = e.fieldsFromTable(table, tableName[shortName], columnPrimary[shortName], columnSecondary[shortName]) for c in sorted(columns): print c, columns[c] # Join table to new fc with name geog_table newFcName = 'gcs_so' + '_' + table[:9] print 'New layer being created: ' + newFcName #print columnPrimary[shortName], columnSecondary[shortName] j.fcToTable(featureClasses['gcs_so'], tables[table], newFcName, columns, geogId[geog], columnPrimary[shortName], columnSecondary[shortName]) l.write(newFcName + ' joined successfully') except: pass l.write('Error during join: ' + traceback.format_exc()) end = time.clock() elapsed = (end - start) l.write('Finsihed join task') l.write('Time for task: ' + str(elapsed) + ' seconds')