def get_target_outline(self, key): cv2.rectangle(self.frame, (0, 0),(500,50),(0,0,0),-1) cv2.putText(self.frame,"Select points around the TARGET area.", (5,14), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(255,255,255),1) cv2.putText(self.frame,"When done, press t to convert to target or any to continue.", (5,30), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(255,255,255),1) cv2.putText(self.frame,"When less than 3 pts, any key to cancel (q to quit).", (5,44), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(255,255,255),1) cv2.imshow("frame", self.frame) """change the function bound to mouse to Target function""" cv2.setMouseCallback("frame", self.select_target) user_input = cv2.waitKey(0); inputMode = True orig = self.frame.copy() while len(self.temp_area) < 3: cv2.imshow("frame", self.frame) if self.cancel_flag is True: self.temp_area = []; break key = cv2.waitKey(0) if len(self.temp_area) > 2: """add new target object""" target_data_temp = DataClass() target_data_temp.target_outline.append(self.temp_area) target_data_temp.target_entries_entered.append([]) target_data_temp.target_entries_x.append([]) target_data_temp.temp_area = []; self.target_objects.append(target_data_temp) self.temp_area = [] self.cancel_flag is False return user_input
def __init__(self, mytab, maindata): self.mydata = maindata #mytab widgets labelsframe = Frame(mytab) reportsframe = Frame(mytab) button = ttk.Button(mytab, text='Zeh Button', command=lambda: self.printreports()) self.pb2 = ttk.Progressbar(mytab, length=100, mode='determinate') dateframe = Frame(mytab) #Labelframe title = tkFont.Font(size=16, underline=1, weight="bold") reportname = ttk.Label(labelsframe, text="Report", font=title) reportops = ttk.Label(labelsframe, text="Report Options", font=title) reportout = ttk.Label(labelsframe, text="Output", font=title) reportname.grid(column=0, row=0) reportops.grid(column=1, row=0) reportout.grid(column=2, row=0) labelsframe.grid(column=0, row=0, sticky=(N, W), pady=(0, 10)) labelsframe.columnconfigure(0, minsize=100) labelsframe.columnconfigure(1, minsize=150) labelsframe.columnconfigure(2, minsize=300) #reportframe counter = 0 self.reports = {} for myreport in data.getrunnablereports("All"): self.reports[myreport] = rrow.reportrow( reportsframe, myreport, 0, counter, data.getreportoptions(myreport), data.getoutputs(myreport)) counter += 1 reportsframe.grid(column=0, row=1, sticky=(N, W), padx=(10, 0)) #button button.grid(column=0, row=2, sticky=(N, W), padx=(10, 0)) self.pb2.grid(column=0, row=3, sticky=(E, W), padx=(10, 10)) self.update("Describe") #dateframe dateframe.grid(column=0, row=4, sticky=(W), padx=(10, 0)) yearlbl = ttk.Label(dateframe, text="Year", width=10) weeklbl = ttk.Label(dateframe, text="Week", width=10) self.year = Text(dateframe, width=10, height=1) self.week = Text(dateframe, width=10, height=1) self.calendar = textbox.textbox(dateframe, 0, 3, 1, 40) self.calendar.readonly(True) datebutton = ttk.Button(dateframe, text='translate week number', command=lambda: self.transweek()) yearlbl.grid(column=0, row=0, sticky=(W)) weeklbl.grid(column=0, row=1, sticky=(W)) self.year.grid(column=1, row=0, sticky=(W)) self.week.grid(column=1, row=1, sticky=(W)) datebutton.grid(column=0, row=2, sticky=(W))
def get_object_outline(self, key): cv2.rectangle(self.frame, (0, 0), (500, 50), (100, 0, 100), -1) cv2.putText(self.frame, "Select points around the OBJECT to track", (5, 14), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1) cv2.putText( self.frame, "When done, press o to convert to object or any to continue", (5, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1) cv2.putText(self.frame, "When less than 3 pts, any key to cancel (q to quit)", (5, 44), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1) cv2.imshow("frame", self.frame) """change the function bound to mouse to Target function""" cv2.setMouseCallback("frame", self.select_object) user_input = cv2.waitKey(0) inputMode = True orig = self.frame.copy() while len(self.temp_object) < 3: cv2.imshow("frame", self.frame) if self.cancel_flag is True: self.temp_object = [] break key = cv2.waitKey(0) if len(self.temp_object) > 2: """add new object object""" object_data_temp = DataClass() outline = self.temp_object self.temp_object = np.array(self.temp_object) s = self.temp_object.sum(axis=1) tl = self.temp_object[np.argmin(s)] br = self.temp_object[np.argmax(s)] roi = orig[tl[1]:br[1], tl[0]:br[0]] roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV) object_data_temp.roiHist = cv2.calcHist([roi.astype('float32')], [0], None, [180], [0, 180]) object_data_temp.roiHist = cv2.normalize(object_data_temp.roiHist, object_data_temp.roiHist, 0, 255, cv2.NORM_MINMAX) object_data_temp.histogram_line = (tl[0], tl[1], br[0], br[1]) self.track_objects.append(object_data_temp) self.temp_object = [] cv2.putText(self.frame, "ob " + str(len(self.track_objects)), (tl[0], tl[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 70, 100), 1) current_poly = np.array(outline, np.int32) current_poly = current_poly.reshape((-1, 1, 2)) cv2.polylines(self.frame, [current_poly], True, (0, 70, 100)) self.cancel_flag is False return user_input
def __init__(self, algorithm_name, c_r, training_type, data_splits, tuning, file_name): """Initializes the ML class with a dataset, file_name and specified algorithm""" self.dataclass = DataClass(file_name, data_splits, c_r) self.algorithm = self.initialize_algorithm(algorithm_name, c_r, tuning, training_type)
def __init__(self, algorithm_name, c_r, tuning, data_splits, display_settings, file_name): """Initializes the ML class with a dataset, file_name and specified algorithm""" self.dataclass = DataClass(file_name, data_splits, c_r) self.file_name = file_name self.algorithm = self.initialize_algorithm(algorithm_name, c_r, tuning)
def single_stock_data(path, split, batch, unit_len, lstm_para, hmm_para): stock_name = path.split('/')[2].split('.')[0] print("Start mission " + stock_name) # 用于模型列名 data_col_list = ['Close_y', 'Volume_y', 'Close_x'] stock_data = DataClass.ModelData(path, split, unit_len, data_col_list) # 训练数据成批封装成Tensor类型 train_batches = DataLoader(dataset=stock_data.lstm_data, batch_size=batch, pin_memory=True) # 创建lstm模型 lstm_model = ModelSet.MidLSTM(input_size=lstm_para['input_size'], hidden_size=lstm_para['hidden_size'], batch_first=lstm_para['batch_first'], drop=lstm_para['drop']) # 调至训练模式 lstm_model.train() # 训练LSTM模型 lstm_model = train_lstm_model(lstm_model=lstm_model, train_batches=train_batches, epochs=lstm_para['epochs'], learning=lstm_para['learning_rate']) # 调至评估模式 lstm_model.eval() # 生成特征值 stock_data.feature_generate(lstm_model) # 使用原始数据训练HMM模型 hmm_train, hmm_test = stock_data.hmm_data_origin(['Close_y', 'Volume_y']) # 保存HMM模型输出特征值 hmm_train_out = hmm_state(hmm_para, hmm_train) hmm_test_out = hmm_state(hmm_para, hmm_test) stock_data.hmm_state_origin(hmm_train_out, hmm_test_out) # 生成线性回归数据 x_train_ori, y_train, x_test_ori, y_test = \ stock_data.linear_regression_dataset(['price_fea', 'index_price_fea']) x_train = product_linear_data(x_train_ori) x_test = product_linear_data(x_test_ori) y_test_pre = linear_regression(x_train, y_train, x_test) # 评价模型好坏程度 stock_evaluate = evaluate(y_test, y_test_pre) print(stock_evaluate) mid_model_result = pd.DataFrame({ 'stock_name': [stock_name], 'x_train': x_train_ori, 'y_train': y_train, 'x_test': x_test_ori, 'y_test': y_test, 'y_test_predict': np.arrau(y_test_pre) }) save_path = './' + data_path.split( '/')[1] + '_Result/' + stock_name + '.npy' np.save(save_path, mid_model_result) print("Finish mission " + stock_name)
def get_object_outline(self, key): cv2.rectangle(self.frame, (0, 0),(500,50),(100,0,100),-1) cv2.putText(self.frame,"Select points around the OBJECT to track", (5,14), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(255,255,255),1) cv2.putText(self.frame,"When done, press o to convert to object or any to continue", (5,30), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(255,255,255),1) cv2.putText(self.frame,"When less than 3 pts, any key to cancel (q to quit)", (5,44), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(255,255,255),1) cv2.imshow("frame", self.frame) """change the function bound to mouse to Target function""" cv2.setMouseCallback("frame", self.select_object) user_input = cv2.waitKey(0); inputMode = True orig = self.frame.copy() while len(self.temp_object) < 3: cv2.imshow("frame", self.frame) if self.cancel_flag is True: self.temp_object = []; break key = cv2.waitKey(0) if len(self.temp_object) > 2: """add new object object""" object_data_temp = DataClass() outline = self.temp_object self.temp_object = np.array(self.temp_object) s = self.temp_object.sum(axis = 1) tl = self.temp_object[np.argmin(s)] br = self.temp_object[np.argmax(s)] roi = orig[tl[1]:br[1], tl[0]:br[0]] roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV) object_data_temp.roiHist = cv2.calcHist([roi.astype('float32')], [0], None, [180], [0, 180]) object_data_temp.roiHist = cv2.normalize(object_data_temp.roiHist, object_data_temp.roiHist, 0, 255, cv2.NORM_MINMAX) object_data_temp.histogram_line = (tl[0], tl[1], br[0], br[1]) self.track_objects.append(object_data_temp) self.temp_object = [] cv2.putText(self.frame,"ob "+str(len(self.track_objects)), (tl[0],tl[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(0,70,100),1) current_poly = np.array(outline, np.int32) current_poly = current_poly.reshape((-1,1,2)) cv2.polylines(self.frame,[current_poly], True,(0,70,100)) self.cancel_flag is False return user_input
def loadData(self): file = open(self.fileName, "r") numLines = sum(1 for line in open(self.fileName)) #numLines = open(self.fileName,"r") for i in range(0, numLines): strLine = file.readline() strLine = strLine.strip('\n') feats = strLine.split(' ') label = feats[len(feats) - 1] feats = feats[0:len(feats) - 2] dataObj = DataClass.Data(feats, label) self.data.append(dataObj)
def __init__(self, mytab, maindata): self.mydata = maindata #mytab widgets labelsframe = Frame(mytab) reportsframe = Frame(mytab) button = ttk.Button(mytab, text='Zeh Button', command=lambda: self.printreports()) self.pb2 = ttk.Progressbar(mytab, length=100, mode='determinate') dateframe = Frame(mytab) #Labelframe title = tkFont.Font(size=16, underline=1, weight="bold") reportname = ttk.Label(labelsframe, text="Report", font=title) reportops = ttk.Label(labelsframe, text="Report Options", font=title) reportout = ttk.Label(labelsframe, text="Output", font=title) reportname.grid(column=0, row=0) reportops.grid(column=1, row=0) reportout.grid(column=2, row=0) labelsframe.grid(column=0, row=0, sticky=(N,W), pady=(0,10)) labelsframe.columnconfigure(0, minsize = 100) labelsframe.columnconfigure(1, minsize = 150) labelsframe.columnconfigure(2, minsize = 300) #reportframe counter = 0 self.reports = {} for myreport in data.getrunnablereports("All"): self.reports[myreport] = rrow.reportrow(reportsframe, myreport, 0, counter, data.getreportoptions(myreport), data.getoutputs(myreport)) counter += 1 reportsframe.grid(column=0, row=1, sticky=(N,W), padx=(10,0)) #button button.grid(column=0, row=2, sticky=(N,W), padx=(10,0)) self.pb2.grid(column=0, row=3, sticky=(E,W),padx=(10,10)) self.update("Describe") #dateframe dateframe.grid(column=0, row=4, sticky=(W), padx=(10,0)) yearlbl = ttk.Label(dateframe, text="Year", width=10) weeklbl = ttk.Label(dateframe, text="Week", width=10) self.year = Text(dateframe, width=10, height=1) self.week = Text(dateframe, width=10, height=1) self.calendar = textbox.textbox(dateframe, 0, 3, 1, 40) self.calendar.readonly(True) datebutton = ttk.Button(dateframe, text='translate week number', command=lambda: self.transweek()) yearlbl.grid(column=0, row=0, sticky=(W)) weeklbl.grid(column=0, row=1, sticky=(W)) self.year.grid(column=1, row=0, sticky=(W)) self.week.grid(column=1, row=1, sticky=(W)) datebutton.grid(column=0, row=2, sticky=(W))
def run(data, kalmanFilter,flag): state_estimations = [] for data_pt in data: if flag: timestamp = start(data_pt, kalmanFilter,flag) else: timestamp = kalman(data_pt, kalmanFilter, timestamp) x = kalmanFilter.x p_x, p_y, Vel_x, Vel_y = x[0, 0], x[1, 0], x[2, 0], x[3, 0] g = {'time': data_pt.get_time(), 'sensor': 'estimate', 'x': p_x, 'y': p_y, 'Vel_x': Vel_x, 'Vel_x': Vel_x, 'Vel_y': Vel_y} state_estimation = DataClass.data1(g) state_estimations.append(state_estimation) return state_estimations
def update(self, eventtype): for key in sorted(self.reports.iterkeys()): self.reports[key].hide() for validreport in data.getrunnablereports(eventtype): self.reports[validreport].show()
def load_celebrity_image(path, file_name, picked_names): # num is size of random picked images if str(file_name).find('.mat'): if str(file_name).find('celebrityImageData') != -1: f = scio.loadmat(path + file_name) # f = h5py.File(path + file_name, 'r') # arrays = {} # for k, v in f.items(): # arrays[k] = np.array(v) celebrityImageData = f['celebrityImageData'] #celebrityImageInfo_list = [] # celebrityImageData to celebrityImageInfo listload_celebrity_image('E:/data/','celebrityImageData.mat',100) # age = celebrityImageData['age'].value[0] ages, identities = celebrityImageData['age'][ 0, 0], celebrityImageData['identity'][0, 0] years, features = celebrityImageData['year'][ 0, 0], celebrityImageData['feature'][0, 0] ranks, lfws = celebrityImageData['rank'][ 0, 0], celebrityImageData['lfw'][0, 0] births, names = celebrityImageData['birth'][ 0, 0], celebrityImageData['name'][0, 0] # if isRandom: # # random pick n=num images to run # r = range(1, len(ages)) # random_pick = random.sample(r, num) # else: # random_pick = range(0, num) images_age_label_identity_list = [] age_label_identity_list = [] for i in range(len(picked_names)): name = picked_names[i] index = names[:, 0].tolist().index([name]) celebrityImageInfo = DataClass.CelebrityImageInfo( age=int(ages[index, 0]), identity=int(identities[index, 0]), year=int(years[index, 0]), rank=int(ranks[index, 0]), lfw=int(lfws[index, 0]), birth=int(births[index, 0]), name=name) age_label_identity = str( celebrityImageInfo.age_label) + '_' + str( celebrityImageInfo.identity) try: index = age_label_identity_list.index(age_label_identity) images_age_label_identity_list[index].append( celebrityImageInfo) except ValueError: age_label_identity_list.append(age_label_identity) images_age_label_identity = [] images_age_label_identity.append(celebrityImageInfo) images_age_label_identity_list.append( images_age_label_identity) #celebrityImageInfo_list.append(celebrityImageInfo) return images_age_label_identity_list elif str(file_name).find('celebrityData') != -1: # f = h5py.File(path + file_name) # celebrityData = f['celebrityData'] f = scio.loadmat(path + file_name) celebrityData = f['celebrityData'] celebrityInfo_list = [] # celebrityData to celebrityInfo list names, identities = celebrityData['name'][ 0, 0], celebrityData['identity'][0, 0] births, ranks = celebrityData['birth'][0, 0], celebrityData['rank'][0, 0] lfws = celebrityData['lfw'][0, 0] for i in range(len(names)): celebrityInfo = DataClass.CelebrityInfo( name=names[i, 0][0], identity=int(identities[i, 0]), birth=int(births[i, 0]), rank=int(ranks[i, 0]), lfw=int(lfws[i, 0])) celebrityInfo_list.append(celebrityInfo) return celebrityInfo_list else: return
return selected_attr + [select_attr_step], min_classify_num_reduct else: selected_attr = selected_attr + [select_attr_step] considered_instance = considered_instance_no_reduct if __name__ == '__main__': # data = [['1', '0', '2', '1', '1'], # ['1', '0', '2', '0', '1'], # ['1', '2', '0', '0', '2'], # ['1', '2', '2', '1', '0'], # ['2', '1', '0', '0', '2'], # ['2', '1', '1', '0', '2'], # ['2', '1', '2', '1', '1']] # core = get_core(dc) # assert core == [1] # CORE(cd)=1 (the second attr) # # considered_instance = np.array([True] * dc.len, np.bool) # is_reduct, classify_num, considered_instance = check_distinct(dc, core, considered_instance) # assert (is_reduct, classify_num, considered_instance) == (False, 1, [False] * 2 + [True] * 5) # selected_attr, max_classify_num = attribute_select(dc) # assert selected_attr == {1, 3} # [case2] dc = DataClass.DataClass([str] * 5) dc.read(r'..\sample\weather.txt', True) selected_attr, max_classify_num = attribute_select(dc) assert selected_attr == {0, 1, 3}
count = 1 # 连续的空值 while index + count < len(need_alert[j]) and need_alert[j][ index + count] == need_alert[j][index] + count: count += 1 before = after = 0 i = need_alert[j][index] if i > 0: before = data[i - 1][j] if i + count - 1 < len(data) - 1: after = data[i + count - 1 + 1][j] if i == 0: before = after if i + count - 1 == len(data) - 1: after = before d = (after - before) / (count + 1) for c in range(count): data[i + c][j] = before + d * (c + 1) index = index + count return data_class if __name__ == "__main__": data = dc.DataClass([str] + [float] * 12) data.read(r"E:\_Python\DataPreprocessing\sample\fz_micro.txt", False) # delete_handle(data,[i for i in range(1, 13)]) data.parse() mid_interpolation_handle(data, [i for i in range(1, 13)]) print(data.data)
import sys sys.path.insert(1, 'Class') import DataClass as dtc import matplotlib.pyplot as plt if (len(sys.argv) < 2) : print("Give a file") exit() filename = sys.argv.pop(1) d = dtc.DataClass(filename) try : id = d.header.index('Hogwarts House') except ValueError : print ("Come on, theme's harry potter, not some space opera !") exit() # separate the tuples in tables of houses gr = [] hu = [] sl = [] ra = [] for tup in d.data : if (tup[id] == 'Gryffindor') : gr.append(tup) if (tup[id] == 'Hufflepuff') : hu.append(tup) if (tup[id] == 'Slytherin') :
def train(): print('\n\n', 'training', '\n\n') sess = tf.Session() dequeueSize = 100 global_step = tf.Variable(0, trainable=False) starter_learning_rate = 0.1 decay_step = 25 decay_rate = 0.96 learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step * dequeueSize, decay_step * dequeueSize, decay_rate, staircase=True) tf.summary.scalar('learning_rate', learning_rate) ###################### # DATASET PARAMETERS # ###################### if os.path.exists( '/media/asgharpn/daten2017-03/Bone_Machine_learning/Learning_dataset/projected_augmented_not_squared_yz_test_01_without_2mice_10' ): print('\nusing full dataset\n') dataBaseDir = '/media/asgharpn/daten2017-03/Bone_Machine_learning/Learning_dataset/projected_augmented_not_squared_yz_test_01_without_2mice_10/' else: raise NameError('Dataset can not be found') trainHdf5 = dataBaseDir + 'bone_projected_train_set.hdf5' validHdf5 = dataBaseDir + 'bone_projected_valid_set.hdf5' cropSize = 733 batchSize = 50 stretchLow = 0.1 # stretch channels lower percentile stretchHigh = 99.9 # stretch channels upper percentile imSize_x = 733 imSize_z = 161 numClasses = 4 numChan = 1 data = dataClass.Data(trainHdf5, ['data', 'Index'], batchSize) dataTest = dataClass.Data(validHdf5, ['data', 'Index'], batchSize * 2) # larger batch size at test time ### define model is_training = tf.placeholder(tf.bool, [], name='is_training') # for batch norm input = tf.placeholder('float32', shape=[None, imSize_x, imSize_z, numChan], name='input') # for batch norm print(input.get_shape) labels = tf.placeholder('float32', shape=[None, numClasses], name='labels') # for batch norm logits = BAAM(input, is_training) predicted_y = tf.nn.softmax(logits, name='softmax') acc = accuracy(predicted_y, labels) cross_entropy = loss_logits(logits, labels) train_step = tf.train.AdamOptimizer(learning_rate).minimize( cross_entropy, global_step=global_step) saver = tf.train.Saver(tf.global_variables(), max_to_keep=100) # Merge all the summaries and write them out to /tmp/mnist_logs (by default) merged = tf.summary.merge_all() train_writer = tf.summary.FileWriter(checkpoint_dir + '/train', sess.graph) test_writer = tf.summary.FileWriter(checkpoint_dir + '/test', sess.graph) sess.run(tf.global_variables_initializer(), {is_training: True}) # training loop for i in range(MAX_STEPS): if i % 50 == 0: # Record execution stats batch = dataTest.getBatch() processedBatch = procIm.preProcessImages(batch['data'], imSize_x, imSize_z, cropSize, numChan, rescale=False, stretch=False, means=None, stds=None, stretchLow=stretchLow, stretchHigh=stretchHigh, jitter=False, randTransform=False) run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) run_metadata = tf.RunMetadata() summary, cur_test_acc, cur_test_loss = sess.run( [merged, acc, cross_entropy], feed_dict={ is_training: False, input: processedBatch, labels: batch['Index'] }, options=run_options, run_metadata=run_metadata) train_writer.add_run_metadata(run_metadata, 'step%03d' % i) test_writer.add_summary(summary, i) print('Adding run metadata for', i) print('Valid accuracy at step %s: %s, loss: %s' % (i, cur_test_acc, cur_test_loss)) batch = data.getBatch() processedBatch = procIm.preProcessImages(batch['data'], imSize_x, imSize_z, cropSize, numChan, rescale=False, stretch=False, means=None, stds=None, stretchLow=stretchLow, stretchHigh=stretchHigh, jitter=False, randTransform=False) summary, _, cur_train_acc, cur_train_loss = sess.run( [merged, train_step, acc, cross_entropy], feed_dict={ is_training: True, input: processedBatch, labels: batch['Index'] }) train_writer.add_summary(summary, i) print('Train accuracy at step %s: %s, loss: %s' % (i, cur_train_acc, cur_train_loss)) if i % SAVE_INTERVAL == 0: checkpoint_path = os.path.join(checkpoint_dir, 'model.ckpt') saver.save(sess, checkpoint_path, global_step=i) train_writer.close() test_writer.close()
def main(): pd.options.mode.chained_assignment = None pd.set_option('display.max_rows', None) pd.set_option('display.max_columns', None) pd.set_option('expand_frame_repr', False) print "Pandas version: {}".format(pd.__version__) print "Fun Fact Finder now online!!" maindata = data.datasets() root = Tk() #removing the Tkinter logo by creating a temp blank icon file ICON = (b'\x00\x00\x01\x00\x01\x00\x10\x10\x00\x00\x01\x00\x08\x00h\x05\x00\x00' b'\x16\x00\x00\x00(\x00\x00\x00\x10\x00\x00\x00 \x00\x00\x00\x01\x00' b'\x08\x00\x00\x00\x00\x00@\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' '\x00\x01\x00\x00\x00\x01') + b'\x00'*1282 + b'\xff'*64 _, ICON_PATH = mkstemp() with open(ICON_PATH, 'wb') as icon_file: icon_file.write(ICON) root.iconbitmap(default=ICON_PATH) root.title("Fun Fact Finder") root.protocol("WM_DELETE_WINDOW",lambda: ask_quit(root, maindata)) root.geometry('800x800+10+10') root.minsize(550,500) root.columnconfigure(0, weight=1) root.rowconfigure(0, weight=1) tkk.Sizegrip(root).grid(column=999, row=999, sticky=(S,E)) mytabs = tkk.Notebook(root) tabevents = Frame(mytabs) mytabs.add(tabevents, text="Choose Events") taboptions = Frame(mytabs) mytabs.add(taboptions, text="Options") mytabs.tab(1, state="disabled") tabudfs = Frame(mytabs) mytabs.add(tabudfs, text="UDFs") mytabs.tab(2, state="disabled") tabreports = Frame(mytabs) mytabs.add(tabreports, text="Reports") mytabs.tab(3, state="disabled") taboutput = Frame(mytabs) mytabs.add(taboutput, text="output") mytabs.tab(4, state="disabled") reportTab = gRTab.ReportsTab(tabreports, maindata) udfTab = gUTab.UDFTab(tabudfs, reportTab, maindata) eventsTab = gETab.EventsTab(tabevents, reportTab, udfTab, maindata) optionsTab = gOTab.OptionsTab(taboptions, maindata) outputTab = gOut.output(taboutput) maindata.setoutput(outputTab) #Adding Menu root.option_add('*tearOff', FALSE) menubar = Menu(root) root['menu'] = menubar menu_file = Menu(menubar) menubar.add_cascade(menu=menu_file, label='File') menu_file.add_command(label='Open', command=lambda: gFunc.loadreports(mytabs, tabevents, eventsTab)) mytabs.grid(row=0, column=0, sticky=(N,E,S,W)) root.mainloop()
def __init__(self, mytab, maindata): self.teamOfOne = IntVar() self.recombine = IntVar() self.dateFirst= IntVar() self.Eventcombine = StringVar() self.Eventcombine.set(data.getDepthLevels()[1]) self.recombine.set(1) self.dateFirst.set(1) self.newref = StringVar() self.data = maindata #mytab widgets checkframe = Frame(mytab) refframe = Frame(mytab) sliderframe = Frame(mytab) #checkframe widgets self.CteamOfOne = ttk.Checkbutton(checkframe, text="Count people in Teams of 1 as individuals?", variable = self.teamOfOne, onvalue = 1, offvalue = 0, command=lambda: self.TeamOneIndividual()) self.CteamRecombine = ttk.Checkbutton(checkframe, text="Recombine team donations?", variable = self.recombine, onvalue = 1, offvalue = 0, command=lambda: self.TeamRecombine()) self.CdateFirst = ttk.Checkbutton(checkframe, text="Is day first (DD/MM/YYYY)", variable = self.dateFirst, onvalue = 1, offvalue = 0, command=lambda: self.SetDateFirst()) depthlbl = ttk.Label(checkframe, text="Depth for groupby?") foo = data.getDepthLevels() self.CEventRecombine = ttk.OptionMenu(checkframe, self.Eventcombine, foo[1], *foo, command=self.EventRecombine) #self.CEventRecombine = ttk.Checkbutton(checkframe, text="Combine Events?", variable = self.Eventcombine, onvalue = True, offvalue = False, command=lambda: self.EventRecombine()) #location round error self.sliderlabel = ttk.Label(sliderframe, text="How accurate to rename locations?: ") self.slider = Scale(sliderframe, from_=0, to=50, orient="horizontal") self.slider.bind("<ButtonRelease-1>", self.updateValue) self.myout = textbox.textbox(sliderframe, 0, 2) self.myout.readonly(True) #refframe widgets self.lbox = Listbox(refframe, height=5, selectmode='extended') s = Scrollbar(refframe, orient=VERTICAL, command=self.lbox.yview) self.refbox = ttk.Entry(refframe, textvariable=self.newref) addbutton = ttk.Button(refframe, text="Add", command= lambda: self.addnewref(self.newref.get())) delbutton = ttk.Button(refframe, text="Delete", command=lambda: self.delrefs()) checkframe.grid(column=0, row=0, sticky=(E,W)) #refframe.grid(column=0, row=1, sticky=(E,W)) sliderframe.grid(column=0, row=3, sticky=(E,W)) self.sliderlabel.grid(column=0, row=0, sticky=(E,W)) self.slider.grid(column=0, row=1, sticky=(E,W)) self.lbox.grid(column=0, row=0, rowspan=5, sticky=(N,S,E,W)) s.grid(column=1, row=0, rowspan=6, sticky=(N,S)) delbutton.grid(column=0, row=6, sticky=(N,S,E,W)) self.refbox.grid(column=2, row=0, sticky=(N,S,E,W)) addbutton.grid(column=2, row=1, sticky=(N,S,E,W)) self.CteamRecombine.grid(column=0, row=0, sticky=(W)) self.CteamOfOne.grid(column=0, row=1, sticky=(W)) #self.CdateFirst.grid(column=0, row=2, sticky=(W)) depthlbl.grid(column=0, row=3, sticky=(W)) self.CEventRecombine.grid(column=0, row=4, sticky=(W))
def datExp(option, UI=False, Filter=False): #This uses the option from the first GUI to get inputs from the correct GUI. Some of the definitions here are #used to maintain full use of Markus' code, such as the definition of higherBound in Beta_GUI tryAgainCounter = 1 if option == "one": from GUI import guioutputs elementName = str(guioutputs.Z) lowerBound = int(guioutputs.isoLow) higherBound = int(guioutputs.isoUp) energyLim = int(guioutputs.E) exitcount = int(guioutputs.exitcount) massData = str(guioutputs.mass) wantedSpins = str(guioutputs.J).replace(" ", "") energyLim = int(guioutputs.E) elementName = elementName.replace(" ", "") elementName = elementName.title() elementName = elementName.split(',') temperature = 0 betaVariable = 'NULL' ## Required parameter of DataClass elif option == "two": from Beta_GUI import betaoutputs elementName = str(betaoutputs.Z) lowerBound = int(betaoutputs.A) higherBound = int(betaoutputs.A) betaVariable = str(betaoutputs.B) energyLim = int(betaoutputs.E) massData = "YES" elementName = elementName.title() wantedSpins = str(betaoutputs.J).replace(" ", "") '''' perTable = open("ElementList.txt","r") periodicTable = perTable.readline() periodicTable = periodicTable.split(',') for item in periodicTable: if item == elementName: index = periodicTable.index(item) if betaVariable == "B+": elementName = periodicTable[index-1] + "," + elementName if betaVariable == "B-": elementName = elementName + "," + periodicTable[index+1] ''' elementName = elementName.replace(" ", "") elementName = elementName.split(',') temperature = float(betaoutputs.temp) exitcount = 0 elif option == "three": from Parabola_GUI import parabolaoutputs elementName = str(parabolaoutputs.Z) lowerBound = int(parabolaoutputs.A) higherBound = int(parabolaoutputs.A) energyLim = 0.0 massData = "YES" wantedSpins = str(parabolaoutputs.J).replace(" ", "") elementName = elementName.replace(" ", "") elementName = elementName.split(',') temperature = float(parabolaoutputs.T) exitcount = 0 betaVariable = 'NULL' ## Required parameter of DataClass if (type(lowerBound) is int and type(higherBound) is int and type(energyLim) is int): tryAgainCounter = 0 ## Create dictionaries of ionization data addion.make_ion_dict(temperature) #This loop goes through each wanted nuclei in the range of A values and makes the variable to be used (and iterated through) to from b in the a=b expression in data class. for element in elementName: for i in range(lowerBound, higherBound + 1): itervar = str(i) + element indata = dc.data('ensdf.' + str(i).zfill(3), itervar, option, betaVariable, energyLim) indata.filterData(wantedSpins, UI) ## Include Atomic Mass Energy Data if option == 'one': pass else: md.addMass(indata) ## Include ionization effects addion.addIonization(indata) ## export .dat file indata.export("_Fil.dat", wantedSpins) if UI: #readinput.message= "Data export complete" exit #If wanted this will return the user inputs for further use return [ elementName, lowerBound, higherBound, wantedSpins, temperature, massData, energyLim, indata.decay ]
def h_theta(x, theta): tot = theta[0] for i in range(len(x)): tot += theta[i + 1] * x[i] return (sig(tot)) # ############################################################################ # if (len(sys.argv) < 2): print("Give a file") exit() filename = sys.argv.pop(1) d = dtc.DataClass(filename) # ############################################################################ # testacc = False # ############################################################################ # ids = [7, 8, 10, 11, 12, 13, 14, 17, 18] # ids = [7, 8, 10, 11, 12, 13, 14, 15, 17, 18] idx = [] idhh = d.header.index("Hogwarts House") theta = {'gr': 0, 'hu': 0, 'sl': 0, 'ra': 0} theta['gr'] = [0] * (len(ids) + 1) theta['hu'] = [0] * (len(ids) + 1) theta['sl'] = [0] * (len(ids) + 1) theta['ra'] = [0] * (len(ids) + 1)
def __init__(self, algorithm_name, c_r, data_splits, tuning, hidden_layers, hidden_nodes, file_name): """Initializes the ML class with a dataset, file_name and specified algorithm""" self.dataclass = DataClass(file_name, data_splits, c_r) self.algorithm = self.initialize_algorithm(algorithm_name, c_r, tuning)
import sys import DataClass as dtc if (len(sys.argv) < 2): print("Give a file") exit() filename = sys.argv.pop(1) try: data = open(filename, "r") except IOError: print("Wrong file duh.") exit() d = dtc.DataClass(data) d.init() d.disp()
def main(): pd.options.mode.chained_assignment = None pd.set_option('display.max_rows', None) pd.set_option('display.max_columns', None) pd.set_option('expand_frame_repr', False) print "Pandas version: {}".format(pd.__version__) print "Fun Fact Finder now online!!" maindata = data.datasets() root = Tk() #removing the Tkinter logo by creating a temp blank icon file ICON = ( b'\x00\x00\x01\x00\x01\x00\x10\x10\x00\x00\x01\x00\x08\x00h\x05\x00\x00' b'\x16\x00\x00\x00(\x00\x00\x00\x10\x00\x00\x00 \x00\x00\x00\x01\x00' b'\x08\x00\x00\x00\x00\x00@\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' '\x00\x01\x00\x00\x00\x01') + b'\x00' * 1282 + b'\xff' * 64 _, ICON_PATH = mkstemp() with open(ICON_PATH, 'wb') as icon_file: icon_file.write(ICON) root.iconbitmap(default=ICON_PATH) root.title("Fun Fact Finder") root.protocol("WM_DELETE_WINDOW", lambda: ask_quit(root, maindata)) root.geometry('800x800+10+10') root.minsize(550, 500) root.columnconfigure(0, weight=1) root.rowconfigure(0, weight=1) tkk.Sizegrip(root).grid(column=999, row=999, sticky=(S, E)) mytabs = tkk.Notebook(root) tabevents = Frame(mytabs) mytabs.add(tabevents, text="Choose Events") taboptions = Frame(mytabs) mytabs.add(taboptions, text="Options") mytabs.tab(1, state="disabled") tabudfs = Frame(mytabs) mytabs.add(tabudfs, text="UDFs") mytabs.tab(2, state="disabled") tabreports = Frame(mytabs) mytabs.add(tabreports, text="Reports") mytabs.tab(3, state="disabled") taboutput = Frame(mytabs) mytabs.add(taboutput, text="output") mytabs.tab(4, state="disabled") reportTab = gRTab.ReportsTab(tabreports, maindata) udfTab = gUTab.UDFTab(tabudfs, reportTab, maindata) eventsTab = gETab.EventsTab(tabevents, reportTab, udfTab, maindata) optionsTab = gOTab.OptionsTab(taboptions, maindata) outputTab = gOut.output(taboutput) maindata.setoutput(outputTab) #Adding Menu root.option_add('*tearOff', FALSE) menubar = Menu(root) root['menu'] = menubar menu_file = Menu(menubar) menubar.add_cascade(menu=menu_file, label='File') menu_file.add_command( label='Open', command=lambda: gFunc.loadreports(mytabs, tabevents, eventsTab)) mytabs.grid(row=0, column=0, sticky=(N, E, S, W)) root.mainloop()