def post(self): ifile = self.request.get("summary") check = self.request.get("isteam") if not ifile: logging.error("no file selected") self.redirect( "/addsummary" ) elif check == "team": mytuple = self.team_process( ifile ) bests = mytuple[0] contestdate = mytuple[1] s = Summary( summary = ifile, bests = bests, name = contestdate ) s.put() self.redirect( "/archive" ) else: mytuple = self.process( ifile ) bests = mytuple[0] contestdate = mytuple[1] s = Summary( summary = ifile, bests = bests, name = contestdate ) s.put() self.redirect( "/archive" )
def run_decode(): logging.debug('decode your input by our pretrained model') try: source = request.get_json()['source'] # GET request with String from frontend directly logging.debug('input: {}'.format(source)) # GET String-type context from the backend try: logging.debug('using the pretrained model.') sentNums, summary = summarizationModel.decode.run_(source) except Exception as e: logging.error(e) else: logging.debug('The number of sentences is {}'.format(sentNums)) logging.debug('The abstract is that {}'.format(summary)) results = {'sent_no': sentNums, 'final': summary} try: article = Content(text=source) abstract = Summary(text=summary) pair = Article(article=article.id, abstract=abstract.id) article.save() abstract.save() pair.save() except Exception as e: logging.error(e) return json.dumps(results) except: message = {'message' : 'Fail to catch the data from client.'} return json.dumps(message)
def get_summary(self, tr_list): map = self.get_map_from_tr_list(tr_list) return Summary(map['Previous Close'], map['Open'], map['Bid'], map['Ask'], map["Day's Range"], map['52 Week Range'], map['Volume'], map['Avg. Volume'], get_number_from_currency(map.get('Market Cap', None)), map.get('Beta', None), map['PE Ratio (TTM)'], map['EPS (TTM)'], map['Earnings Date'], map['Forward Dividend & Yield'], map['Ex-Dividend Date'], map['1y Target Est'])
def train(): """ A simple Neural Network :return: """ logging.info("Start Training!") corpus_type = 'train' summary = Summary(args.batch_size, args.max_len) criterion = nn.BCELoss() optimizer = torch.optim.Adam(summary.parameters(), lr=args.lr) summary.train() start_epoch = 0 if args.model_name: checkpoint = torch.load(args.load_model) summary = checkpoint['model'] start_epoch = checkpoint['epochs'] start_epoch += 1 if start_epoch != 0 else start_epoch for epoch in range(start_epoch, args.epoch): epoch_loss = 0 batch_num = 0 for i, batch in enumerate(bachify_data(corpus_type)): batch_df, batch_label, _, _ = batch batch_df = torch.tensor(batch_df) batch_label = torch.tensor(batch_label) binary_output = summary(batch_df) # calculate loss loss = criterion(binary_output, batch_label) loss.backward() optimizer.step() epoch_loss += loss batch_num += 1 logging.info("Epoch {}: Total loss is {}, Avg loss is {}".format(epoch, epoch_loss, epoch_loss/batch_num)) # store model model_name = "{}_epoch_model.tar".format(epoch) directory = os.path.join(args.save_path, model_name) if not os.path.exists(directory): os.makedirs(directory) torch.save({ 'model': summary.state_dict(), 'loss': epoch_loss / batch_num, "epochs": epoch }, directory) logging.info("Finish Training!")