Exemple #1
0
 def update(self, sql):
     cur = self.conn.cursor()
     try:
         cur.execute(sql)
         self.conn.commit()
         print('%s:sql=%s' % (get_time(), sql))
     except Exception as e:
         print('%s:sql=%s' % (get_time(), sql))
         print(e)
     finally:
         self.close_cur(cur)
Exemple #2
0
    def tick(self, max_time=None, *args, **kws):
        '''
        first poll process for packets, then network messages, then actor updates

        note: the max_time takes a "best effort" approach.  It does not gurantee that processing will always
        finish on time (duration less than max_time specified)
        However, it does insure that it poll at least one ipc and one network message
        per iteration, to avoid "starvation"

        :Parameters:
            - `max_time`: processing time limit in seconds so that the event processing does not take too long. 
              not all messages are guranteed to be processed with this limiter
        
        :Return:
            - true: if all messages ready for processing were completed
            - false: otherwise (i.e.: processing took more than max_time)
        '''
        cut_off_time = None
        if max_time:
            cut_off_time = util.get_time() + max_time
        # server manager need to monitor sub-groups
        if self.is_main_process:
            for group, (p, _id, switch) in self.groups.items():    
                if not processing.is_alive(p):
                    raise GroupFailed('Group "%s" failed' % group, group)

        # always poll at least one ipc message here
        has_more = True
        while has_more:
            has_more = self.ipc_transport.poll(self.packet_handler)
            if cut_off_time and util.get_time() > cut_off_time:
                break
        
        # always poll at least one network message here
        if self.transport:
            has_more = True
            while has_more:
                has_more = self.transport.poll(self.packet_handler)
                if cut_off_time and util.get_time() > cut_off_time:
                    break

#        self.log(logging.DEBUG, 'process "%s" queue length: %s' % (processing.get_pid(processing.current_process()), self.queue_length))
        
        # process all messages first
        new_max_time = None
        if cut_off_time:
            new_max_time = cut_off_time - util.get_time()
        # process these messages given the newly calculated max time
        ret = messaging.MessageManager.tick(self, max_time = new_max_time, **kws)
        # then update all actors
        map(lambda x: x.update(*args, **kws), sorted(self.objectIDMap.values(), lambda x,y: y._SYNC_PRIORITY - x._SYNC_PRIORITY))
        return ret
Exemple #3
0
 def insert(self, sql):
     id = 0
     cur = self.conn.cursor()
     try:
         cur.execute(sql)
         id = int(self.conn.insert_id())
         self.conn.commit()
         print('%s:sql=%s' % (get_time(), sql))
     except Exception as e:
         print('%s:sql=%s' % (get_time(), sql))
         print(e)
     finally:
         self.close_cur(cur)
     return id
Exemple #4
0
def aggregate(fn):
    hit = defaultdict(int)
    miss = defaultdict(int)
    rep = defaultdict(int)
    for line in open(fn):
        if line.startswith('[RHC]'):
            time = get_time(line)
            hit[time] +=1
        elif line.startswith('[RNH]'):
            time = get_time(line)
            miss[time] += 1
        elif line.startswith('[RCM]'):
            time = get_time(line)
            rep[time] += 1
    return hit, miss, rep
Exemple #5
0
def aggregate(fn):
    hit = defaultdict(int)
    miss = defaultdict(int)
    rep = defaultdict(int)
    for line in open(fn):
        if line.startswith('[RHC]'):
            time = get_time(line)
            hit[time] += 1
        elif line.startswith('[RNH]'):
            time = get_time(line)
            miss[time] += 1
        elif line.startswith('[RCM]'):
            time = get_time(line)
            rep[time] += 1
    return hit, miss, rep
Exemple #6
0
def add_question(cursor, question):
    question["submission_time"] = util.get_time()
    cursor.execute(
        """
                    INSERT INTO question (submission_time, view_number, title, message)
                    VALUES (%(submission_time)s, 0, %(title)s, %(message)s)
                    """, question)
Exemple #7
0
 def follow_users_from_handle(self, handle, limit, max):
     try:
         lst = self.getAllFollowers(handle,max)
     except tweepy.RateLimitError:
         print("sleeping on rate limit")
         self.emit(SIGNAL('post_follow(QString)'), "bad")
         self.sleep(15 * 60)
     count = 0
     msg = str(len(lst))
     print(len(lst))
     self.emit(SIGNAL('setup_prog(QString)'), msg)
     for user in lst:
         if count == limit:
             self.sleep(26500)
             count = 0
         b = self.check_follow(self.me.id, user.id)
         t = util.get_time()
         if b is True:
             cursor = self.db.cursor()
             cursor.execute('''INSERT INTO followed_users(id, screen_name) VALUES(?,?)''',
                            (user.id, user.screen_name))
             self.db.commit()
             try:
                 self.api.create_friendship(user.id)
                 message= f"{t} following user: {user.screen_name}"
                 self.emit(SIGNAL('post_follow(QString)'), message)
                 self.sleep(random.randint(1, 720))
             except TweepError:
                 pass
         else:
             message = f"{t} friendship already exists: {user.screen_name}"
             self.emit(SIGNAL('post_follow(QString)'), message)
             self.sleep(5)
     return
Exemple #8
0
def edit_comment(comment_id):
    try:
        user_id = data_manager.get_userid_by_username(session['username'])
    except KeyError:
        return redirect('/')
    if request.method == 'GET' and user_id == data_manager.get_foreign_key_by_id(
            data_manager.comment_db, 'users_id', comment_id)[0]['users_id']:
        comment = data_manager.get_line_data_by_id(data_manager.comment_db,
                                                   comment_id)
        return render_template('add_edit_comment_head.html',
                               comment_id=comment_id,
                               comment=comment,
                               user_id=user_id)

    elif request.method == 'POST' and user_id == data_manager.get_foreign_key_by_id(
            data_manager.comment_db, 'users_id', comment_id)[0]['users_id']:
        data_manager.update_comment_message_submt_editedc_by_id(
            comment_id, request.form['comment'], util.get_time())
    answer_id = data_manager.get_foreign_key_by_id(data_manager.comment_db,
                                                   'answer_id',
                                                   comment_id)[0]['answer_id']
    if answer_id:
        question_id = data_manager.get_foreign_key_by_id(
            data_manager.answer_db, 'question_id', answer_id)[0]['question_id']
    else:
        question_id = data_manager.get_foreign_key_by_id(
            data_manager.comment_db, 'question_id',
            comment_id)[0]['question_id']
    if not user_id == data_manager.get_foreign_key_by_id(
            data_manager.comment_db, 'users_id', comment_id)[0]['users_id']:
        flash('Invalid user')
    return redirect('/question/{}'.format(question_id))
 def start_record(self):
     """
     Begins dumping the configured stream to disk.
     """
     
     with self.__lock:
         # don't allow the user to start the record process twice
         if self.rtpdump.isalive():
             msg = "rtpdump process was already running"
             raise ProcessAlreadyRunningError(msg)
         
         # name of the file we're dumping to
         dump_file = os.path.join(self.dump_dir,
                         util.generate_file_name(self.video_basename))
         
         # start the process
         self.rtpdump.start(dump_file, self.dump_address, self.dump_port)
         
         if not util.block_until(self.rtpdump.isalive, self.max_block_time):
             msg = "rtpdump process failed to start within allotted time"
             raise ProcessOperationTimeoutError(msg)
         
         # set state variables to correspond to new file if process started
         self._commit_time = None
         self._start_time = util.get_time()
         self._dump_file = dump_file
 def test_batch_generator(self):
     """
     Test to check if the batch_generator function chooses a context word
     in the skip_window for each center word
     """
     vocab_size = 4208
     self.dr.process_data(vocab_size)
     data_index = 0
     skip_window = randint(1, 50)
     num_skips = max(int(skip_window/2), 2)
     batch_size = num_skips*3
     new_index, batch, label = self.dr.batch_generator(batch_size,
                                                       num_skips,
                                                       skip_window,
                                                       data_index)
     batch = list(batch)
     for i, word in enumerate(self.dr.data[0:new_index]):
         while word in batch and skip_window <= i:
             index = batch.index(word)
             context = label[index][0]
             before = self.dr.data[i-skip_window:i]
             after = self.dr.data[i+1:i+skip_window+1]
             self.assertTrue(context in before or context in after)
             batch[index] = -1
     print("\nBuilding bacth time = {}".format(get_time(self.dr.batch_generator,
                                                        [batch_size,
                                                         data_index])))
Exemple #11
0
    def follow_users_from_retweeters(self, link, limit):
        id_tweet = link.split("/")[-1]
        count = 0
        try:
            retweeters = self.api.retweeters(id_tweet)
        except tweepy.RateLimitError:
            print("sleeping on rate limit")
            self.emit(SIGNAL('post_follow(QString)'), "bad")
            self.sleep(15 * 60)
            self.follow_users_from_retweeters(link, limit)
        strcount = str(len(retweeters))
        msg = strcount
        self.emit(SIGNAL('setup_prog(QString)'), msg)
        for u in retweeters:
            user = self.api.get_user(u)
            if count == limit:
                time.sleep(26500)
                count = 0
            t = util.get_time()
            b = self.check_follow(self.me.id, u)
            if b is True:
                message = f"{t} following user: {user.screen_name}"
                cursor = self.db.cursor()
                cursor.execute('''INSERT INTO followed_users(id, screen_name) VALUES(?,?)''', (user.id, user.screen_name))
                self.db.commit()
                self.api.create_friendship(u)

                self.emit(SIGNAL('post_follow(QString)'), message)
                self.sleep(random.randint(1, 720))
            else:
                message = f"{t} friendship already exists: {user.screen_name}"
                self.emit(SIGNAL('post_follow(QString)'), message)
                self.sleep(5)
        return
Exemple #12
0
def horiz_time_const(d, params):
    '''
    TO DO:
    '''

    fig = plt.figure()
    fig.set_tight_layout(True)
    ax = fig.add_subplot(111)
    pf.AxisFormat()
    pf.TufteAxis(ax, ['bottom', 'left'], [3, 3])

    time = util.get_time(d)
    inds = np.where(np.logical_and(time >= 450, time <= 600))[0]

    time_ = time[inds] - 500

    for t in range(d['ntrial']):
        h1 = util.get_cell_data(d['tr'][t], 'h1')
        h2 = util.get_cell_data(d['tr'][t], 'h2')

        h1dat = d['tr'][t]['r'][h1[0]]['x'][inds]
        h2dat = d['tr'][t]['r'][h2[0]]['x'][inds]

        ax.plot(time_, h1dat, label='H1')  # only use 1st response
        ax.plot(time_, h2dat, label='H2')  # only use 1st response

    #ax[1].set_ylabel('response')
    ax.set_xlabel('time (ms)')
    ax.legend(fontsize=22, loc='lower right')

    savedir = util.get_save_dirname(params, check_randomized=True)
    fig.savefig(savedir + 'h_time_const' + str(t) + '.eps', edgecolor='none')
    plt.show(block=params['block_plots'])
Exemple #13
0
def edit_answer(cursor, answer):
    answer["submission_time"] = util.get_time()
    cursor.execute(
        """
                    UPDATE answer
                    SET submission_time=%(submission_time)s, message=%(message)s
                    WHERE id=%(id)s""", answer)
	def _get_image(self, size, action):
		"""
		Fetches the large image for lightboxing for the given photo id. Returns
		the image raw data.
		"""
		id = self._get_id_from_path(action)
		try:
			id = int(id)
			p = Photo.get_by_id(id)
		except Exception as e:
			p = None

		if p == None:
			fc = util.FileContainer(os.path.join(S.IMPORT_DIR, id), S.IMPORT_DIR)
			fc.time = util.get_time(fc)["time"]
			p = Photo.from_file_container(fc)

		if p == None:
			Logger.warning("could not find photo for %s" % id)
			image_path = S.BROKEN_IMG_PATH
		else:
			rel_thumb_path = p.get_or_create_thumb(size)
			image_path = os.path.join(S.THUMBNAIL_DIR, rel_thumb_path)

		f = open(image_path)
		raw_image = f.read()
		f.close()
		return self.construct_response(raw_image, self._route_types.JPEG_CONTENT_TYPE)
Exemple #15
0
def get_img_list(wf):
    app_id = wf.stored_data('app_id')
    if app_id:
        # wf.add_item(title='jifdjsi', subtitle='jifdsji')
        # wf.send_feedback()
        img_list = leancloud.get_list(wf)
        if len(img_list) > 0:
            for img in img_list:
                url = img[u'url']
                delete = img[u'delete']
                arg = u"url:%s delete:%s" % (url, delete)
                created_at = util.get_time(img[u'createdAt'])
                wf.add_item(
                    url,  # title
                    # subtitle
                    u'创建时间:%s' % created_at,
                    arg=arg,
                    quicklookurl=url,
                    valid=True)

        else:
            wf.add_item(title='什么都没查到奥',
                        subtitle='设置app_id与app_key之后才开始记录上传列表')
        wf.send_feedback()
        pass
    else:
        wf.add_item(title='鉴权失败,请先设置Leancloud的App_id,app_key再进行使用',
                    subtitle='回车进行设置',
                    arg='set:',
                    valid=True)
        wf.send_feedback()
Exemple #16
0
    def _get_image(self, size, action):
        """
		Fetches the large image for lightboxing for the given photo id. Returns
		the image raw data.
		"""
        id = self._get_id_from_path(action)
        try:
            id = int(id)
            p = Photo.get_by_id(id)
        except Exception as e:
            p = None

        if p == None:
            fc = util.FileContainer(os.path.join(S.IMPORT_DIR, id),
                                    S.IMPORT_DIR)
            fc.time = util.get_time(fc)["time"]
            p = Photo.from_file_container(fc)

        if p == None:
            Logger.warning("could not find photo for %s" % id)
            image_path = S.BROKEN_IMG_PATH
        else:
            rel_thumb_path = p.get_or_create_thumb(size)
            image_path = os.path.join(S.THUMBNAIL_DIR, rel_thumb_path)

        f = open(image_path)
        raw_image = f.read()
        f.close()
        return self.construct_response(raw_image,
                                       self._route_types.JPEG_CONTENT_TYPE)
Exemple #17
0
 def getCompletedList(self):
     params = {
         "CSRF": self.CSRF,
         "StartTime": util.get_7_day_ago(),
         "EndTime": util.get_time()
     }
     return self.request("https://api.uyiban.com/officeTask/client/index/completedList", params=params,
                         cookies=self.COOKIES)
Exemple #18
0
    def preview(self):
        """
		Presents a preview of the files to be imported, giving the user an
		opportunity to view and change dates for images, highlighting images
		which may already be in the system, and the like.
		"""
        rel_import_dir = os.path.relpath(
            self._env.get("PATH_INFO", "").lstrip("/"), "import/preview")
        import_dir = os.path.realpath(
            os.path.join(S.IMPORT_DIR, rel_import_dir))
        file_listing = []
        import_identifier = hashlib.sha1()
        hashes = []
        session_file_struct = {}
        for base_dir, _, files in os.walk(import_dir):
            for f in files:
                if not util.is_image_file(f):
                    continue
                fc = util.FileContainer(os.path.join(import_dir, f),
                                        S.IMPORT_DIR)
                ts = util.get_time(fc, allow_date_from_path=False)
                if ts["time"] != None:
                    fc.time = time.strftime("%Y-%m-%d %H:%M:%S", ts["time"])
                hashes.append(fc.hash)
                import_identifier.update(fc.hash)
                file_listing.append(fc)
                session_file_struct[fc.hash] = {
                    "file_data": fc.__dict__(),
                    "conflicts": None
                }
            break
        file_listing = sorted(file_listing, key=itemgetter('name'))
        conflicts = Photo.get_by_hash(hashes)

        for conflict_hash in conflicts.keys():
            conflicts_for_json = [c.id for c in conflicts[conflict_hash]]
            session_file_struct[conflict_hash][
                "conflicts"] = conflicts_for_json
            session_file_struct[conflict_hash]["file_data"]["marked"] = True
            Logger.debug(session_file_struct)

        session_id = import_identifier.hexdigest()
        session_data = {
            "file_listing": session_file_struct,
            "rel_dir": rel_import_dir,
            "session_id": session_id
        }
        with open(os.path.join("/tmp", "%s.txt" % session_id), "w+") as f:
            f.write(json.dumps(session_data))

        return self.construct_response(
            Template.render(
                "import/preview.html", {
                    "files": file_listing,
                    "import_id": session_id,
                    "import_dir": rel_import_dir,
                    "conflicts": conflicts
                }), self._route_types.HTML_CONTENT_TYPE)
	def preview(self):
		"""
		Presents a preview of the files to be imported, giving the user an
		opportunity to view and change dates for images, highlighting images
		which may already be in the system, and the like.
		"""
		rel_import_dir = os.path.relpath(self._env.get("PATH_INFO", "").lstrip("/"), "import/preview")
		import_dir = os.path.realpath(os.path.join(S.IMPORT_DIR, rel_import_dir))
		file_listing = []
		import_identifier = hashlib.sha1()
		hashes = []
		session_file_struct = {}
		for base_dir, _, files in os.walk(import_dir):
			for f in files:
				if not util.is_image_file(f):
					continue
				fc = util.FileContainer(os.path.join(import_dir, f), S.IMPORT_DIR)
				ts = util.get_time(fc, allow_date_from_path=False)
				if ts["time"] != None:
					fc.time = time.strftime("%Y-%m-%d %H:%M:%S", ts["time"])
				hashes.append(fc.hash)
				import_identifier.update(fc.hash)
				file_listing.append(fc)
				session_file_struct[fc.hash] = {
					"file_data": fc.__dict__(),
					"conflicts": None
				}
			break
		file_listing = sorted(file_listing, key=itemgetter('name'))
		conflicts = Photo.get_by_hash(hashes)

		for conflict_hash in conflicts.keys():
			conflicts_for_json = [c.id for c in conflicts[conflict_hash]]
			session_file_struct[conflict_hash]["conflicts"] = conflicts_for_json
			session_file_struct[conflict_hash]["file_data"]["marked"] = True
			Logger.debug(session_file_struct)

		session_id = import_identifier.hexdigest()
		session_data = {
			"file_listing": session_file_struct,
			"rel_dir": rel_import_dir,
			"session_id": session_id
		}
		with open(os.path.join("/tmp", "%s.txt" % session_id), "w+") as f:
			f.write(json.dumps(session_data))

		return self.construct_response(
			Template.render(
				"import/preview.html",
				{
					"files": file_listing,
					"import_id": session_id,
					"import_dir": rel_import_dir,
					"conflicts": conflicts
				}
			),
			self._route_types.HTML_CONTENT_TYPE
		)
 def show_progress(self, opt, it, loss):
     time_elapsed = util.get_time(time.time() - self.time_start)
     print("it {0}/{1}, lr:{3}, loss:{4}, time:{2}".format(
         util.cyan("{}".format(it + 1)),
         opt.to_it,
         util.green("{0}:{1:02d}:{2:05.2f}".format(*time_elapsed)),
         util.yellow("{:.2e}".format(opt.lr_pmo)),
         util.red("{:.4e}".format(loss.all)),
     ))
            def read_var(self, offset_on=False, read_bytearray=None):

                if offset_on is True:
                    offset = self.get_offset()
                else:
                    offset = 0

                if read_bytearray is None:
                    _bytearray_read = read_bytearray
                else:
                    _bytearray_read = read_bytearray

                if self._variable_type == "BOOL":
                    return snap7.util.get_bool(_bytearray_read, offset,
                                               self.get_bit_offset())

                if self._variable_type == "REAL":
                    return snap7.util.get_real(_bytearray_read, offset)

                if self._variable_type == "BYTE":
                    return util.get_byte(_bytearray_read, offset)

                if self._variable_type == "WORD":
                    return util.get_word(_bytearray_read, offset)

                if self._variable_type == "DWORD":
                    return snap7.util.get_dword(_bytearray_read, offset)

                if self._variable_type == "INT":
                    return snap7.util.get_int(_bytearray_read, offset)

                if self._variable_type == "DINT":
                    return util.get_dint(_bytearray_read, offset)

                if self._variable_type == "S5TIME":
                    return util.get_s5time(_bytearray_read, offset)

                if self._variable_type == "TIME":
                    return util.get_time(_bytearray_read, offset)

                if self._variable_type == "DATE":
                    return util.get_date(_bytearray_read, offset)

                if self._variable_type == "TIME_OF_DAY":
                    return util.get_time_of_day(_bytearray_read, offset)

                if self._variable_type == "CHAR":
                    return util.get_char(_bytearray_read, offset)

                if self._variable_type[:6:] == "STRING":
                    if self._variable_type[7:-1:]:
                        var = snap7.util.get_string(_bytearray_read, offset,
                                                    self.get_size())
                    else:
                        var = 'under constrution'
                    return var
Exemple #22
0
    def write_error(self, error_info: str, collector_addr: str):
        """
        保存出错日志到文件
        日期-数采地址
        :param error_info:错误信息
        :return:无
        """
        file_dir = "./log/error_log"
        file_name = util.get_time()[0:10] + "-" + collector_addr + "-error.csv"
        file_path = file_dir + "/" + file_name

        if os.path.exists(file_dir) is False:
            os.makedirs(file_dir)

        file = open(file_path, "a+", newline="")
        csv_write = csv.writer(file)
        data_row = [util.get_time(), error_info]
        csv_write.writerow(data_row)
        file.close()
Exemple #23
0
 def find_matches(self, results):
     starting_time = util.get_time()
     for idx, rec in enumerate(results):
         tinder_api.like(rec['_id'])
         print('Liked ' + rec['name'])
         event_emitter.emit_like(rec['_id'])
         if idx % 5 == 0:
             matches = tinder_api.get_updates(starting_time)
             print(matches)
         self.socketSleep(2)
Exemple #24
0
    def write_wind_speed(self, wind_speed: float, collector_addr):
        """
        写入风速到 CSV 文件
        :param wind_speed:
        :return:
        """
        file_dir = "./log/wind_speed"
        file_name = util.get_time(
        )[0:10] + "-" + collector_addr + "-wind_speed.csv"
        file_path = file_dir + "/" + file_name

        if os.path.exists(file_dir) is False:
            os.makedirs(file_dir)

        file = open(file_path, "a+", newline="")
        csv_write = csv.writer(file)
        data_row = [util.get_time(), str(wind_speed)]
        csv_write.writerow(data_row)
        file.close()
Exemple #25
0
 def show_progress(self,opt,ep,loss):
     [lr] = self.sched.get_lr()
     time_elapsed = util.get_time(time.time()-self.time_start)
     print("ep {0}/{1}, lr:{3}, loss:{4}, time:{2}"
         .format(util.cyan("{}".format(ep+1)),
                 opt.to_epoch,
                 util.green("{0}:{1:02d}:{2:05.2f}".format(*time_elapsed)),
                 util.yellow("{:.2e}".format(lr)),
                 util.red("{:.4e}".format(loss.all)),
     ))
Exemple #26
0
 def excute(self, sql):
     cur = self.conn.cursor()
     try:
         print('%s:sql=%s' % (get_time(), sql))
         cur.execute(sql)
         self.conn.commit()
     except Exception as e:
         print(traceback.format_exc())
     finally:
         self.close_cur(cur)
	def get_items(self):
		for story in self.stories:
			#类型判断,1原创2转播3评论4对话5视频6音乐9心情12赞
			self.type=1
			try:
				isquotation = story.find_element_by_class_name('replyBox')
				self.type=2
			except:
				None
			try:
				isComm = story.find_element_by_class_name('feedComm')
				self.type=3
			except:
				None
			self.cid = story.get_attribute("id")
			self.tid = story.get_attribute("rel")
			try:
				self.author = story.find_element_by_class_name('userName').find_element_by_tag_name('a').get_attribute('title')
			except:
				print('出现灵异事件?')
				print('----------------------------------------------------------------------------------')
				util.add_log('出现灵异事件'+self.cid)
				continue
			self.time, self.qtime = util.get_time(story)
			
			self.content = util.analyse_content_html(self,story)
			
			#打印信息
			print('CID:', self.cid)
			print('TID:', self.tid)
			if self.type == 1:
				print('类型:原创')
			else:
				if self.type == 2:
					print('类型:转播')
				else:
					print('类型:评论')
			print('作者:', self.author)
			print('内容:', self.content)
			print('时间:', self.time)
			#心情模块
			self.mood = util.get_mood(self, story)
			#视频模块
			util.analyze_video(self,story)
			if self.type!=1:#转评作者内容时间
				self.qauthor , self.qcontent = util.get_quotation_html(self,story)
				if self.qtime !="":
					print('原文时间:', self.qtime)
			else:
				self.qauthor = ""
				self.qcontent = ""
			util.get_image(self,story)
			self.location , self.longitude , self.latitude = util.get_loc(story)#定位
			util.sql_insert(self)
			print('----------------------------------------------------------------------------------')
Exemple #28
0
def register_user(cursor, username, plain_text_password):
    password = util.hash_password(plain_text_password)
    current_date = util.get_time()
    cursor.execute(("""
                    INSERT INTO users
                    (username, password, reputation, registration_date)
                    VALUES (%(username)s, %(password)s, 0, %(registration_date)s)
                   """),
                   dict(username=username,
                        password=password,
                        registration_date=current_date))
Exemple #29
0
def post_answer(cursor, message, question_id):
    submission_time = util.get_time()
    cursor.execute(
        """
                    INSERT INTO answer (submission_time, question_id, message)
                    VALUES(%(submission_time)s, %(question_id)s, %(message)s)
                    """, {
            'submission_time': submission_time,
            'question_id': question_id,
            'message': message
        })
 def test_build_vocab(self):
     """
     Test to check if the read_text function
     return a list of words given a txt file.
     """
     vocab_size = 500
     _, dic, revert_dic = self.dr.build_vocab(self.words, vocab_size)
     print("\nBuilding vocab time = {}\n".format(
         get_time(self.dr.build_vocab, vocab_size)))
     self.assertTrue(len(dic) == vocab_size)
     self.assertTrue(len(revert_dic) == vocab_size)
Exemple #31
0
def run_optimization():
    # Define the optimizer object
    autoencBO = BayesianOptimization(target, mut_settings)

    max_iter = 1000
    #report_every = 5
    init_points = 30

    print("%s Starting optimization with %d initial points, %d iterations." %
          (util.get_time(), init_points, max_iter))
    # Higher kappa -> higher exploration (lower exploitation)
    autoencBO.maximize(init_points=init_points,
                       n_iter=max_iter,
                       acq="ucb",
                       kappa=8,
                       **gp_params)

    # Save resulting data
    autoencBO.points_to_csv(SAVE_PATH)
    print("%s Results written to %s" % (util.get_time(), SAVE_PATH))
Exemple #32
0
 def update_company_search(self, table_name, main_classi, id):
     cur = self.conn.cursor()
     try:
         sql = 'update %s set main_classi=main_classi+\"%s\",updated_at= \"%s\" where id=%d;' % (
             table_name, main_classi, get_time(), id)
         cur.execute(sql)
         self.conn.commit()
     except Exception as e:
         print("Mysql Error %s" % (str(e)))
     finally:
         self.close_cur(cur)
Exemple #33
0
def handle_update():
    statuses = get_statuses()
    previous_statuses = get_latest_statuses(statuses.keys())
    diffs = find_diffs(previous_statuses, statuses)
    log.debug(f'Changes: {diffs}')

    if diffs:
        send_updates(diffs)

    time = get_time()
    for location, status in statuses.items():
        save(location, time, status)
Exemple #34
0
def add_comment(question_id=None, answer_id=None, comment=None, comment_id=-1):
    try:
        user_id = data_manager.get_userid_by_username(session['username'])
    except KeyError:
        return redirect('/')
    if request.method == 'GET':
        if request.path.startswith("/q"):
            question_title = data_manager.get_line_data_by_id(
                data_manager.question_db, question_id)[0]['title']
            return render_template('add_edit_comment_head.html',
                                   question_title=question_title,
                                   comment=comment,
                                   comment_id=comment_id,
                                   user_id=user_id)
        else:
            answer_message = data_manager.get_line_data_by_id(
                data_manager.answer_db, answer_id)[0]['message']
            return render_template('add_edit_comment_head.html',
                                   answer_id=answer_id,
                                   answer_message=answer_message,
                                   comment=comment,
                                   comment_id=comment_id,
                                   user_id=user_id)

    elif request.method == 'POST':
        user_id = data_manager.get_userid_by_username(session.get('username'))
        if request.path.startswith("/q"):
            data_manager.add_comment_to_table(data_manager.comment_db,
                                              'question_id', question_id,
                                              request.form['comment'],
                                              util.get_time(), 0, user_id)
        else:
            question_id = data_manager.get_foreign_key_by_id(
                data_manager.answer_db, 'question_id',
                answer_id)[0]['question_id']
            data_manager.add_comment_to_table(data_manager.comment_db,
                                              'answer_id', answer_id,
                                              request.form['comment'],
                                              util.get_time(), 0, user_id)
        return redirect('/question/{}'.format(question_id))
Exemple #35
0
 def calc_points(self):
   if self.date is None or self.variable is None:
     return
   data = datastr.read(self.variable, self.date)
   del self.points[0:len(self.points)]
   min_val, max_val = self.variable.get_bounds()
   for item in data:
     scale_y = (int(item[2]) - min_val) / (max_val - min_val)
     y = self.height * scale_y + self.y
     str_time = util.get_time(item[3])
     scale_x = util.get_time_float(str_time)
     x = self.width * scale_x * self.zoom + self.x - self.offset * self.zoom
     self.points.append(x)
     self.points.append(y)
Exemple #36
0
 def _get_status(self):
     """
     Returns a tuple of the current commit time, the amount of time
     elapsed since the current recording started, and whether the
     rtpdump process is currently recording.
     
     Not thread-safe, intended for internal use only.
     """
     
     elapsed_time = None
     if self._start_time is not None:
         elapsed_time = util.get_time() - self._start_time
     
     is_recording = self.rtpdump.isalive()
     
     # we ensure the types of all this outgoing data
     return self._commit_time, elapsed_time, is_recording
	def default(self):
		"""
		Fetches a list of directories, files, or some combination thereof which
		need to be imported. The specific path is assumed from the PATH_INFO
		provided by self._env
		"""
		rel_import_dir = os.path.relpath(self._env.get("PATH_INFO", "").lstrip("/"), "import")
		dir_to_show = os.path.join(S.IMPORT_DIR, rel_import_dir)
		file_listing = []
		dir_listing = []
		get_details = True
		total_files = 0
		for base_dir, dirs, files in os.walk(dir_to_show):
			if get_details:
				for d in dirs:
					dir_listing.append({
						"rel_path": os.path.relpath(os.path.join(base_dir, d), S.IMPORT_DIR),
						"friendly_name": d
					})
				dir_listing = sorted(dir_listing, key=itemgetter("friendly_name"))
			for f in files:
				if not util.is_image_file(f):
					continue
				total_files += 1
				if not get_details:
					continue
				fc = util.FileContainer(os.path.join(base_dir, f), base_dir)
				time_resp = util.get_time(fc, allow_date_from_path=False)
				if time_resp["time"] != None:
					fc.time = time.strftime(util.EXIF_DATE_FORMAT, time_resp["time"])
				file_listing.append(fc)
			get_details = False

		file_listing = sorted(file_listing, key=itemgetter('name'))

		return self.construct_response(
			Template.render("import/listing.html", {
				"dirs": dir_listing,
				"files": file_listing,
				"total_files": total_files,
				"current_dir": rel_import_dir
			})
			, self._route_types.HTML_CONTENT_TYPE
		)
Exemple #38
0
    if manager.packet_types:
        assert set(manager.packet_types) == set(packet_types)
    else:
        manager.packet_types = packet_types
    manager._ipc_connect(server_addr, _should_quit)
    manager.log(logging.INFO, 'current process "%s" is bound to address: "%s"' % (processing.get_pid(processing.current_process()), manager.ipc_transport._connection.fileno()))
    try:
        default_actor = default_actor_class()
    except Exception, e:
        raise DefaultActorFailed('Default actor class "%s" failed to initialize. ("%s")' % (default_actor_class, e))
    else:
        manager.register_actor(default_actor)
    # manager is now seen as a child
    assert not manager.is_main_process
    while not manager._should_quit.value:
        start = util.get_time()
        try:
            manager.tick(max_time=max_tick_time)
        except Exception, e:
            manager.log(logging.ERROR, 'process "%s" failed with: %s' % (processing.get_pid(processing.current_process()), e))
            raise
        # we want to sleep the difference between the time it took to process and the interval desired
        _time_to_sleep = interval - (util.get_time() - start)
        if _time_to_sleep > 0.0:
            time.sleep(_time_to_sleep)
    return False

class ActorManager(messaging.MessageManager):
    '''provides actor, IPC and network functionality'''
    PYSAGE_MAIN_GROUP = '__MAIN_GROUP__'
    def init(self):
    def update(self,link_id,count_id):
        time_now = int(time.time())
        questionUrl = 'http://www.zhihu.com/question/' + link_id

        content = get_content(questionUrl,count_id)
        if content == "FAIL":
            sql = "UPDATE QUESTION SET LAST_VISIT = %s WHERE LINK_ID = %s"
            self.cursor.execute(sql,(time_now,link_id))
            return

        soup = BeautifulSoup(content)

        # There are 3 numbers in this format
        # Focus, Last Activated and Review
        numbers = soup.findAll('div',attrs={'class':'zg-gray-normal'})

        if len(numbers) != 3:
            print "LINK_ID:" + link_id + "Does not have 3 numbers"
            return
        focus    = numbers[0]
        activate = numbers[1]
        review   = numbers[2]
        # Find out how many people focus this question.
        m = re.search(r'<strong>(.*?)</strong>', str(focus))
        if m == None:
            focus_amount = '0'
        else:
            focus_amount = m.group(1)
        # Find out when is this question last activated
        m = re.search(r'>(.*?)<', str(activate))
        if m == None:
            activate_time = u'Unknown'
        else:
            activate_time = get_time(m.group(1))
        # Find out how many people reviewed this question
        m = re.search(r'<strong>(.*?)</strong>', str(review))
        if m == None:
            review_amount = '0'
        else:
            review_amount = m.group(1)

        # Find out how many people answered this question.
        answer_amount = soup.find('h3',attrs={'id':'zh-question-answer-num'})
        if answer_amount != None:
            answer_amount = answer_amount.get_text().replace(u' 个回答','')
        else:
            answer_amount = soup.find('div',attrs={'class':'zm-item-answer'})
            if answer_amount != None:
                answer_amount = u'1'
            else:
                answer_amount = u'0'

        # Find out the top answer's vote amount.
        top_answer = soup.findAll('span',attrs={'class':'count'})
        if top_answer == []:
            top_answer_votes = 0
        else:
            top_answer_votes = 0
            for t in top_answer:
                t = t.get_text()
                t = t.replace('K','000')
                t = int(t)
                if t > top_answer_votes:
                    top_answer_votes = t

        # Find out the first commend date.
        comment_dates = soup.findAll('a',class_="answer-date-link")
        if comment_dates == []:
            first_comment_time = 0
        else:
            times = map(get_time, comment_dates)
            first_comment_time = min(times)

        # print it to check if everything is good.
        if count_id % 1 == 0:
            print str(count_id) + " , " + self.getName() + " Update QUESTION set FOCUS = " + focus_amount + " , ANSWER = " + answer_amount + ", LAST_VISIT = " + str(time_now) + ", TOP_ANSWER_NUMBER = " + str(top_answer_votes) + " where LINK_ID = " + link_id
        #print str(count_id) + " , " + self.getName() + " Update QUESTION set FOCUS = " + focus_amount + " , ANSWER = " + answer_amount + ", LAST_VISIT = " + str(time_now) + ", TOP_ANSWER_NUMBER = " + str(top_answer_votes) + " where LINK_ID = " + link_id
        
        # Update this question
        sql = "UPDATE QUESTION SET FOCUS = %s , ANSWER = %s, LAST_VISIT = %s, TOP_ANSWER_NUMBER = %s , ACTIVATE = %s, REVIEW = %s , FIRST_COMMENT = %s WHERE LINK_ID = %s"
        self.cursor.execute(sql,(focus_amount,answer_amount,time_now,top_answer_votes,activate_time, review_amount, first_comment_time, link_id))

        # Find out the topics related to this question
        topics = soup.findAll('a',attrs={'class':'zm-item-tag'})
        sql_str = "INSERT IGNORE INTO TOPIC (NAME, LAST_VISIT, LINK_ID, ADD_TIME, PRIORITY) VALUES (%s, %s, %s, %s, %s)"
        topicList = []
        for topic in topics:
            topicName = topic.get_text().replace('\n','')
            topicUrl = topic.get('href').replace('/topic/','')
            #sql_str = sql_str + "('" + topicName + "',0," + topicUrl + "," + str(time_now) + "),"
            topicList = topicList + [(topicName, 0, topicUrl, time_now, 0)]
        
        self.cursor.executemany(sql_str,topicList)