def __send_file(self, line, client, handler): """ Sends the file which is in the given path. """ response = "" bucket_name, file_name = Parser().args(line) data = handler.down(bucket_name, file_name) if data: size = len(data) header = f"{bucket_name} {file_name} {size}" sent = False while not sent: # resending until get confirmation client.send(header.encode()) sent = client.recv(self.BUFFER_SIZE) print(f"# Sending {file_name}") with BytesIO(data) as buffer: while True: bytes_read = buffer.read(self.BUFFER_SIZE) if not bytes_read: break try: client.sendall(bytes_read) except: print("# Error while sending the file. Please, retry.") return print("# Sent") client.recv(self.BUFFER_SIZE) response = "Success!" else: client.send("not found".encode()) response = f"Not found: {bucket_name}/{file_name}" return response
def __init__(self, directory): this = path.relpath(__file__) # this files path root = path.split(this)[0] # removing <filename>.py root = path.join(root, directory) if not path.isdir(root): print(f"# '{root}' is not found. Creating...") os.mkdir(root) self.root = root self.parser = Parser()
def __init__(self, parent, log_file): super().__init__(parent) self.log_file = log_file self.columns = 2 self.column_names = ['timestamp', 'text'] self.parser = Parser(self.log_file) self.thread = QThread() self.create_parser() self.create_gui()
def __response(self, line, client): """ Manages incoming lines """ parser = Parser() response = "" if not parser.parse(line): response = f"Command corrupted: {line}" else: handler = CommandHandler(self.__root_directory) instruction = parser.instruction(line) if instruction == "up": response = self.__receive_file(client, handler) elif instruction == "down": response = self.__send_file(line, client, handler) else: response = handler.execute(line) return response
def weighted_mean_squared_error(y_true, y_pred): difference = y_pred - y_true weights = array([20, 20, 20, 20, 1, 1, 1]) return K.mean(K.square((difference * weights)), axis=-1) inputLayer = Input(shape=(21, )) hiddenLayer1 = Dense(11)(inputLayer) outputLayer = Dense(7)(hiddenLayer1) model = Model(inputs=inputLayer, outputs=outputLayer) model.compile(optimizer='adam', loss='mean_squared_error', metrics=['accuracy']) p = Parser() dataFileTrain = sys.argv[1] dataFileTest = sys.argv[2] inputDataTrain = array(p.Parse(dataFileTrain)) print(inputDataTrain.shape) outputDataTrain = array(p.ParseSpine(dataFileTrain)) print(outputDataTrain.shape) history = model.fit(inputDataTrain, outputDataTrain, 32, 2000) print(model.outputs) [print(n.name) for n in K.get_session().graph.as_graph_def().node] # summarize history for loss plt.plot(history.history['loss']) plt.title('model loss')
[('--lr',), {'type': float, 'default': 0.001, 'help': 'Learning rate'}], [('--lrs',), {'type': int, 'default': [30, 60, 90], 'nargs': '+', 'help': 'Learning rate schedule'}], [('--lrd',), {'type': float, 'default': 0.1, 'help': 'Learning rate decay'}], [('--l2',), {'type': float, 'default': 0.0, 'help': 'L2 regularization'}], [('-d',), {'type': float, 'default': 0.0, 'help': 'Dropout probability'}], [('--dataset',), {'type': str, 'default': 'mnist', 'help': 'Dataset to use'}], [('--root',), {'type': str, 'default': '/mnt/DATA/TorchData', 'help': 'Location of the dataset'}], [('--save_path', '-s'), {'type': str, 'default': '/mnt/DATA/ProjectsResults/contestai', 'help': 'Results path'}], [('--batch_size', '-bs'), {'type': int, 'default': 64, 'help': 'Batch size'}], [('--epochs', '-e'), {'type': int, 'default': 120, 'help': 'Number of epochs'}], [('--log_period', '-lp'), {'type': int, 'default': 20, 'help': 'Logging period in number of epochs'}], #[('--optimizer', '-opt'), {'type': str, 'default': 'adam', 'help': 'Optimizer to use'}], [('--pretrained', '-pr'), {'type': int, 'default': True, 'help': 'Use the pretrained model?'}] ] argparser = Parser("Beantech challenge") argparser.add_arguments(args) opt = argparser.get_dictionary() dirname = build_dirname(opt, ('lr', 'batch_size')) savepath = make_save_directory(opt, dirname) vis = Visdom(port=8098) vm = VisualManager(vis, 'contestai') W = 1280 H = 180 path_training_ok = '/mnt/DATA/beantech_contestAI/Dataset2/campioni OK' path_training_ko = '/mnt/DATA/beantech_contestAI/Dataset2/campioni KO' path_validation_ok = '/mnt/DATA/beantech_contestAI/Dataset1/campioni OK'
def __init__(self): self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #http_bind_address self.BUFFER_SIZE = 1024 self.parser = Parser()
def post(self): try: # Parse HTTP Body parser = Parser(['roomId']) args = parser.get_args() # Read whole log data room_id = args['roomId'] reader = MongoReader(room_id) topics, p_contents = reader.read_topic_n_content() print(topics) print(p_contents) # TODO: 예지 : 주제별요약&키워드가 담길 배열 변수 --> 이부분이 요청으로 들어갈 2개의 변수 아래 예시데이터보면 확인할 수 있음 contents = [] keywords = [] # Abstract summary, keywords summary = {} keyword = {} summary_stop_word = set([('있', 'VV'), ('웃', 'VV'), ('와우', 'IC'), ('시작', 'NNG'), ('협조', 'NNG'), ('하', 'VV'), ('되', 'VV'), ('이', 'VCP'), ('것', 'NNB'), ('들', 'XSN'), ('그', 'MM'), ('수', 'NNB'), ('이', 'NP'), ('보', 'VX'), ('않', 'VX'), ('없', 'VA'), ('나', 'NP'), ('주', 'VV'), ('아니', 'VCN'), ('등', 'NNB'), ('같', 'VA'), ('우리', 'NP'), ('때', 'NNG'), ('년', 'NNB'), ('가', 'VV'), ('한', 'MM'), ('지', 'VX'), ('대하', 'VV'), ('오', 'VV'), ('그렇', 'VA'), ('위하', 'VV'), ('그것', 'NP'), ('두', 'VV'), ('그러나', 'MAJ'), ('못하', 'VX'), ('그런', 'MM'), ('또', 'MAG'), ('더', 'MAG'), ('그리고', 'MAJ'), ('중', 'NNB'), ('씨', 'NNB'), ('지금', 'NNG'), ('그러', 'VV'), ('속', 'NNG'), ('데', 'NNB'), ('안', 'MAG'), ('어떤', 'MM'), ('내', 'NP'), ('다시', 'MAG'), ('이런', 'MM'), ('번', 'NNB'), ('나', 'VX'), ('어떻', 'VA'), ('개', 'NNB'), ('이렇', 'VA'), ('점', 'NNG'), ('좀', 'MAG'), ('잘', 'MAG'), ('이빨', 'NNG')]) keyword_stop_word = set([('있', 'VV'), ('웃', 'VV'), ('와우', 'IC'), ('시작', 'NNG'), ('협조', 'NNG'), ('하', 'VV'), ('되', 'VV'), ('이', 'VCP'), ('것', 'NNB'), ('들', 'XSN'), ('그', 'MM'), ('수', 'NNB'), ('이', 'NP'), ('보', 'VX'), ('않', 'VX'), ('없', 'VA'), ('나', 'NP'), ('주', 'VV'), ('아니', 'VCN'), ('등', 'NNB'), ('같', 'VA'), ('우리', 'NP'), ('때', 'NNG'), ('년', 'NNB'), ('가', 'VV'), ('한', 'MM'), ('지', 'VX'), ('대하', 'VV'), ('오', 'VV'), ('그렇', 'VA'), ('위하', 'VV'), ('그것', 'NP'), ('두', 'VV'), ('그러나', 'MAJ'), ('못하', 'VX'), ('그런', 'MM'), ('또', 'MAG'), ('더', 'MAG'), ('그리고', 'MAJ'), ('중', 'NNB'), ('씨', 'NNB'), ('지금', 'NNG'), ('그러', 'VV'), ('속', 'NNG'), ('데', 'NNB'), ('안', 'MAG'), ('어떤', 'MM'), ('내', 'NP'), ('다시', 'MAG'), ('이런', 'MM'), ('번', 'NNB'), ('나', 'VX'), ('어떻', 'VA'), ('개', 'NNB'), ('이렇', 'VA'), ('점', 'NNG'), ('좀', 'MAG'), ('잘', 'MAG'), ('이빨', 'NNG')]) # abstract summary, keywords from whole data for topic in topics: if topic not in p_contents: continue text = p_contents[topic] print(' -*-*- WHOLE-TEXT -*-*- ') print(text) summary_temp = Summary(text, summary_stop_word).run() keyword_temp = Keyword(text, keyword_stop_word).run() print(' -*-*- SEMI-RESULT -*-*- ') print(f"summary_temp => {summary_temp}") print(f"keyword_temp => {keyword_temp}") summary[topic] = summary_temp keyword[topic] = keyword_temp # TODO: 예지 - 주제별 요약 contents.append({"topic": topic, "content": summary_temp}) # TODO: 예지 - 주제별 키워드 # 전체 합 구한 다음 각각의 값에서 sum을 나누고 100을 곱해 비율계산한 뒤 자연수로 변환 total = 0 for val in keyword_temp.values(): total += val for key, val in keyword_temp.items(): keywords.append({ "keyword": f"{key[0][0]}", "value": int((val / total) * 100) }) total_to_convert = 0 for word_val in keywords: total_to_convert += word_val["value"] for word_val in keywords: word_val['value'] = int(word_val['value'] * (7 / total_to_convert) + 8) print('==== Conference Log Summary ====') print(contents) print(keywords) return { 'status': 200, 'data': json.dumps({ "keywords": keywords, "contents": contents }) } except Exception as e: print(e) print(traceback.format_exc()) return {'status': '500', 'message': f'{e}'}
def __init__(self, rank, file_list, output_dir, params): super(ParserMapper, self).__init__(rank, file_list, output_dir, params) self.parser = Parser()