def insert_one(table, columns, values, options, count): insert(table, columns, values, **db_args(options)) count += 1 if count % 100 == 0: info('processed {} ...'.format(count)) if options.commit_every < 2 or count % options.commit_every == 0: if options.commit_every > 100: info('committing ...') commit(**db_args(options)) return count
def process_IN_MODIFY(self, event): ''' Handles a file modification event, typically when one or more event records have been appended to the accounting file. The new records are sent to the accounting database here. ''' if event.name == os.path.basename(self.acctfile): if self.dryrun: self.logger.info("Would normally send records") else: if self.recs == None: # Segfaults if file doesn't exist -- No exception thrown if os.path.isfile(self.acctfile): self.recs = pylsf.lsb_geteventrec(self.acctfile) else: # Report to log fmt = "Couldn't open acct file: %s" strerr = "No such file or directory" self.logger.error(fmt % strerr) return insertc, errorc, heartbeat = common.insert(self.logger, common.LOCALTAB, self.recs, self.connection, self.insertc, self.errorc, self.heartbeat, self.heartbeatdelta) self.insertc = insertc self.errorc = errorc self.heartbeat = heartbeat
def save_data(result_dic): sql = ''' INSERT INTO shanghai_index ( `code_id`, `name`, `current_value`, `yesterday_end`, `today_begin`, `today_highest`, `today_lowest`, `deal_count`, `compare_to_yesterday`, `amplitude`, `deal_money` ) VALUES ( '{code_id}', '{name}', '{current_value}', '{yesterday_end}', '{today_begin}', '{today_highest}', '{today_lowest}', '{deal_count}', '{compare_to_yesterday}', '{amplitude}', '{deal_money}' ); '''.format(name=result_dic['name'], code_id=result_dic['id'], current_value=result_dic['current_value'], yesterday_end=result_dic['yesterday_end'], today_begin=result_dic['today_begin'], today_highest=result_dic['today_highest'], today_lowest=result_dic['today_lowest'], amplitude=result_dic['amplitude'], deal_count=result_dic['deal_count'], deal_money=result_dic['deal_money'], compare_to_yesterday=result_dic['compare_to_yesterday']) print("==================save success!") print(sql) insert(sql)
def process_IN_CREATE(self, event): ''' Handles a file creation event, which happens on the second and last stage of a logrotation. A new lsb_geteventrec instance is created (i.e. the new accounting file with the original accounting name is opened again). ''' # Don't wm.close() here, as it would close all watches. Don't # wm.del_watch() either because I don't think you'd be able to add # the watch again without restarting the notifier loop (and I don't # know if it's possible to restart the notifier loop either). And # don't wm.rm_watch() because it makes pyinotify complain. In fact, # leave the watches alone and just make sure you don't have any watch # looking at files directly, as opposed to looking at directories. if event.name == os.path.basename(self.acctfile): # Shouldn't be necessary but read any possibly unhandled event # records from the old file, in case new entries have been # appended to it, it's been renamed and the new file has been # created atomically. # It's possible to do it this way because seems to be targeting # inodes, not file names. if self.dryrun: self.logger.info("Would normally send records") elif self.recs != None: insertc, errorc, heartbeat = common.insert(self.logger, common.LOCALTAB, self.recs, self.connection, self.insertc, self.errorc, self.heartbeat, self.heartbeatdelta) self.insertc = insertc self.errorc = errorc self.heartbeat = heartbeat self.logger.info('Created %s' % event.name) if os.path.isfile(self.acctfile): # Previous file implicitly closed here when the lsb_geteventrec # is deallocated self.recs = pylsf.lsb_geteventrec(self.acctfile) else: # Report to log strerr = "No such file or directory" self.logger.error("Couldn't open acct file: %s" % strerr)
def insert_relation(id_, document_id, ann, options, **kwargs): from_ = ann['body'].pop('from') # reduce redundancy in JSON to_ = ann['body'].pop('to') table = 'relations' columns = ['id', 'document_id', 'from_id', 'to_id'] values = [id_, document_id, from_, to_] if options.denormalize: # see denormalize() from_data_body_id = ann.pop('__from_data_body_id') to_data_body_id = ann.pop('__to_data_body_id') columns.extend(['__from_data_body_id', '__to_data_body_id']) values.extend([from_data_body_id, to_data_body_id]) columns.append('data') values.append(json.dumps(ann)) return insert(table, columns, values, **kwargs)
def process_IN_MODIFY(self, event): ''' Handles a file modification event in currently-read files as well as in newly-created ones, typically when one or more event records have been appended to the accounting file. The new records are sent to the database here. ''' # Issue: not sure how to deal with the currently-read file if it gets # overwritten. Two options: # 1. The file pointer should be reset because it's a new file # altogether. # 2. The file pointer shouldn't be reset because it's the same data # which has been overwritten in the process of being added new lines # (e.g. vi). # Let's go for the second option, which the following does without even # needing any CREATE event. try: l = latest(self.acctdir) if l == self.acctfile: # There's no new accounting file f = open(self.acctdir + '/' + l) recs = common.parse(f, self) insertc, errorc, heartbeat = \ common.insert(self.logger, common.CETAB, recs, self.connection, self.insertc, self.errorc, self.heartbeat, self.heartbeatdelta) f.close() self.insertc = insertc self.errorc = errorc self.heartbeat = heartbeat else: # There's a new accounting file (or we weren't reading any) # Finish reading the current one if we were reading one if self.acctfile != None: f = open(self.acctdir + '/' + self.acctfile) recs = common.parse(f, self) insertc, errorc, heartbeat = \ common.insert(self.logger, common.CETAB, recs, self.connection, self.insertc, self.errorc, self.heartbeat, self.heartbeatdelta) f.close() self.insertc = insertc self.errorc = errorc self.heartbeat = heartbeat # Read the new one self.acctfile = l self.offset = 0 self.logger.info("Will now be watching %s" % self.acctfile) f = open(self.acctdir + '/' + self.acctfile) recs = common.parse(f, self) insertc, errorc, heartbeat = \ common.insert(self.logger, common.CETAB, recs, self.connection, self.insertc, self.errorc, self.heartbeat, self.heartbeatdelta) f.close() self.insertc = insertc self.errorc = errorc self.heartbeat = heartbeat except common.AcctError: # As raised by latest # No need to fuss if a file we're not interested in gets changed pass
def insert_span(id_, document_id, ann, options, **kwargs): table = 'mentions' columns = ('id', 'document_id', 'data') values = (id_, document_id, json.dumps(ann)) return insert(table, columns, values, **kwargs)
def save_data(code_id, name, type, useful=0): sql = "INSERT INTO stock_info (`code_id`, `name`, `type`, `useful`) VALUES ({code_id}, {name}, {type}, {useful});".format( code_id=code_id, name=name, type=type, useful=useful) print(sql) insert(sql)