コード例 #1
0
ファイル: pcs.py プロジェクト: BuddhismZhang/bcloud
def list_trash(cookie, tokens, path="/", page=1, num=100):
    """获取回收站的信息.

    path - 目录的绝对路径, 默认是根目录
    page - 页码, 默认是第一页
    num - 每页有多少个文件, 默认是100个.
    回收站里面的文件会被保存10天, 10天后会自动被清空.
    回收站里面的文件不占用用户的存储空间.
    """
    url = "".join(
        [
            const.PAN_API_URL,
            "recycle/list?channel=chunlei&clienttype=0&web=1",
            "&num=",
            str(num),
            "&t=",
            util.timestamp(),
            "&dir=",
            encoder.encode_uri_component(path),
            "&t=",
            util.latency(),
            "&order=time&desc=1",
            "&_=",
            util.timestamp(),
            "&bdstoken=",
            tokens["bdstoken"],
        ]
    )
    req = net.urlopen(url, headers={"Cookie": cookie.header_output()})
    if req:
        content = req.data
        return json.loads(content.decode())
    else:
        return None
コード例 #2
0
def list_trash(cookie, tokens, path='/', page=1, num=100):
    '''获取回收站的信息.

    path - 目录的绝对路径, 默认是根目录
    page - 页码, 默认是第一页
    num - 每页有多少个文件, 默认是100个.
    回收站里面的文件会被保存10天, 10天后会自动被清空.
    回收站里面的文件不占用用户的存储空间.
    '''
    url = ''.join([
        const.PAN_API_URL,
        'recycle/list?channel=chunlei&clienttype=0&web=1',
        '&num=',
        str(num),
        '&t=',
        util.timestamp(),
        '&dir=',
        encoder.encode_uri_component(path),
        '&t=',
        util.latency(),
        '&order=time&desc=1',
        '&_=',
        util.timestamp(),
        '&bdstoken=',
        tokens['bdstoken'],
    ])
    req = net.urlopen(url, headers={'Cookie': cookie.header_output()})
    if req:
        content = req.data
        return json.loads(content.decode())
    else:
        return None
コード例 #3
0
ファイル: models.py プロジェクト: jmhobbs/simple-mail-feeder
	def new_from_url ( url ):
		try:
			d = feedparser.parse( url )
			if not d.has_key( 'status' ):
				raise Exception( 'Error fetching content. Bad URL?' )
			if d.status != 200 and d.status != 301 and d.status != 302:
				raise Exception( d.debug_message)
			if not d.feed.has_key( 'title' ):
				raise Exception( "Content does not appear to be an RSS feed." )
			
			if d.has_key( 'etag' ):
				etag = d.etag
			else:
				etag = ''
			
			if d.has_key( 'modified' ):
				modified = d['modified']
			else:
				modified = datetime.now().timetuple()
			
			feed = Feed()
			feed.url = d.href
			feed.title = d.feed.title
			feed.link = d.feed.link
			try:
				feed.description = d.feed.description
			except AttributeError, e:
				pass
			feed.etag = etag
			feed.modified = util.timestamp( modified )
			feed.added = util.timestamp()
			
			feed.insert()
			
			return feed
コード例 #4
0
ファイル: pcs.py プロジェクト: lubing521/bcloud
def list_share_path(cookie, tokens, uk, path, share_id, page):
    '''列举出用户共享的某一个目录中的文件信息

    uk       - user key
    path     - 共享目录
    share_id - 共享文件的ID值
    '''
    url = ''.join([
        const.PAN_URL,
        'share/list?channel=chunlei&clienttype=0&web=1&num=100',
        '&t=', util.timestamp(),
        '&page=', str(page),
        '&dir=', encoder.encode_uri_component(path),
        '&t=', util.latency(),
        '&shareid=', share_id,
        '&order=time&desc=1',
        '&uk=', uk,
        '&_=', util.timestamp(),
        '&bdstoken=', tokens['bdstoken'],
        ])
    req = net.urlopen(url, headers={
        'Cookie': cookie.header_output(),
        'Referer': const.SHARE_REFERER,
        })
    if req:
        content = req.data
        return json.loads(content.decode())
    else:
        return None
コード例 #5
0
def main_loop():
    try:
        with sr.Microphone() as source:
            with Halo(text='Awaiting voice input.'):
                audio = r.listen(source)

            with Halo(text='Processing audio..'):
                result = r.recognize_google(audio)

            timestamp(result + '\n')()

            result = process(result, commands)

            if result == '':
                ready()
                return

            kb.write(result + ' ')

            ready()
    except sr.UnknownValueError:
        print("Google Speech Recognition could not understand audio")
    
    except sr.RequestError as e:
        print("Could not request results from Google Speech Recognition service; {0}".format(e))
コード例 #6
0
ファイル: pcs.py プロジェクト: blueyi/bcloud
def list_trash(cookie, tokens, path='/', page=1, num=100):
    '''获取回收站的信息.

    path - 目录的绝对路径, 默认是根目录
    page - 页码, 默认是第一页
    num - 每页有多少个文件, 默认是100个.
    回收站里面的文件会被保存10天, 10天后会自动被清空.
    回收站里面的文件不占用用户的存储空间.
    '''
    url = ''.join([
        const.PAN_API_URL,
        'recycle/list?channel=chunlei&clienttype=0&web=1',
        '&num=', str(num),
        '&t=', util.timestamp(),
        '&dir=', encoder.encode_uri_component(path),
        '&t=', util.latency(),
        '&order=time&desc=1',
        '&_=', util.timestamp(),
        '&bdstoken=', tokens['bdstoken'],
        ])
    req = net.urlopen(url, headers={'Cookie': cookie.header_output()})
    if req:
        content = req.data
        return json.loads(content.decode())
    else:
        return None
コード例 #7
0
ファイル: pcs.py プロジェクト: blueyi/bcloud
def list_share(cookie, tokens, path='/', page=1, num=100):
    '''获取用户已经共享的文件的信息

    path - 哪个目录的信息, 默认为根目录.
    page - 页数, 默认为第一页.
    num - 一次性获取的共享文件的数量, 默认为100个.
    '''
    url = ''.join([
        const.PAN_URL,
        'share/record?channel=chunlei&clienttype=0&web=1',
        '&num=', str(num),
        '&t=', util.timestamp(),
        '&page=', str(page),
        '&dir=', encoder.encode_uri_component(path),
        '&t=', util.latency(), 
        '&order=tme&desc=1',
        '&_=', util.timestamp(),
        '&bdstoken=', tokens['bdstoken'],
        ])
    req = net.urlopen(url, headers={
        'Cookie': cookie.header_output(),
        'Referer': const.SHARE_REFERER,
        })
    if req:
        content = req.data
        return json.loads(content.decode())
    else:
        return None
コード例 #8
0
    def run(self):
        ''' Метод собирающий информацию о хосте, по результатам выполнения
            вызывает два сигнала SNMPScannerThread.work_done(DeviceInfo)
            и SNMPScannerThread.recived_message(str)
        '''
        devinfo = None

        msg = self.tr('{}|info|host: {} |msg: {}').format(timestamp(),
            self._host, self.tr('processing...'))

        self.recived_message.emit(msg)
        try:
            client = SNMPClient(self._host, community=self._community)
            devinfo = get_deviceinfo(client)

        except SNMPError as err:
            msg = self.tr('{}|error|host: {} |msg: {}').format(timestamp(), self._host, err)
            self.recived_message.emit(msg)
            return

        if devinfo:
            msg = self.tr('{}|info|host: {} |msg: {}').format(timestamp(),
                self._host, self.tr('complete'))

            self.recived_message.emit(msg)
            self.work_done.emit(devinfo)
コード例 #9
0
def list_share_path(cookie, tokens, uk, path, share_id, page):
    '''列举出用户共享的某一个目录中的文件信息

    uk       - user key
    path     - 共享目录
    share_id - 共享文件的ID值
    '''
    url = ''.join([
        const.PAN_URL,
        'share/list?channel=chunlei&clienttype=0&web=1&num=100',
        '&t=', util.timestamp(),
        '&page=', str(page),
        '&dir=', encoder.encode_uri_component(path),
        '&t=', util.latency(),
        '&shareid=', share_id,
        '&order=time&desc=1',
        '&uk=', uk,
        '&_=', util.timestamp(),
        '&bdstoken=', tokens['bdstoken'],
        ])
    req = net.urlopen(url, headers={
        'Cookie': cookie.header_output(),
        'Referer': const.SHARE_REFERER,
        })
    if req:
        content = req.data
        return json.loads(content.decode())
    else:
        return None
コード例 #10
0
async def add_bridge_link(db, c1, c2, cause=None, bidirectional=True):
    logging.info("Bridging %s and %s (bidirectional: %s)", repr(c1), repr(c2),
                 bidirectional)
    links[c1].add(c2)
    if bidirectional: links[c2].add(c1)
    await db.execute(
        "INSERT INTO links VALUES (?, ?, ?, ?, ?, ?) ON CONFLICT DO NOTHING",
        (c1[0], c1[1], c2[0], c2[1], util.timestamp(), cause))
    if bidirectional:
        await db.execute(
            "INSERT INTO links VALUES (?, ?, ?, ?, ?, ?) ON CONFLICT DO NOTHING",
            (c2[0], c2[1], c1[0], c1[1], util.timestamp(), cause))
    await db.commit()
コード例 #11
0
ファイル: feeder.py プロジェクト: jmhobbs/simple-mail-feeder
def run ( config_path ):
	config = ConfigParser.RawConfigParser()
	config.read( config_path )
	conn = sqlite3.connect( config.get( 'Database', 'path' ) )
	cursor = conn.cursor()
	while True:
		try:
			res = cursor.execute( 'SELECT [id], [url], [modified], [etag], [interval] FROM [feeds] WHERE [checked] + [interval] < ?', ( util.timestamp(), ) )
			for row in res:
				try:
					d = feedparser.parse( row[1], etag=row[3], modified=row[2] )
					if not d.has_key( 'status' ):
						raise Exception( 'Error fetching content. Bad URL?' )
					if d.status != 200 and d.status != 301 and d.status != 302 and d.status != 304:
						raise Exception( d.debug_message)
					if not d.feed.has_key( 'title' ):
						raise Exception( "Content does not appear to be an RSS feed." )
				except Exception, e:
					conn.execute( "INSERT INTO [log] ( [logged], [level], [message] ) VALUES ( ?, ?, ? )", ( util.timestamp(), 'ERROR', 'Error fetching feed #' + str( row[0] ) + ": " + str( e ) ) )
					conn.execute( "UPDATE [feeds] SET [checked] = ? WHERE [id] = ?", ( util.timestamp() - int( row[4] / 2 ), row[0] ) )
					continue
				
				try:
					if d.status == 304:
						conn.execute( "UPDATE [feeds] SET [checked] = ? WHERE [id] = ?", ( util.timestamp(), row[0] ) )
					else:
						count = 0
						for entry in d.entries:
							result = conn.execute( "SELECT COUNT(*) FROM messages WHERE [feed_id] = ? AND [uuid] = ?", ( row[0], entry.id ) )
							if 0 != result[0][0]:
								break
							
							conn.execute( "INSERT INTO messages ( [feed_id], [fetched], [posted], [title], [link], [uuid], [content] ) VALUES ( ?, ?, ?, ?, ?, ?, ? )", ( row[0], util.timestamp(), util.timestamp( entry.date_parsed ), entry.title, entry.link, entry.id, entry.content) )

						if d.has_key( 'etag' ):
							etag = d.etag
						else:
							etag = ''
						
						if d.has_key( 'modified' ):
							modified = modified
						else:
							modified = datetime.now().timetuple()

						conn.execute( "UPDATE [feeds] SET [checked] = ?, [modified] = ?, [etag] = ? WHERE [id] = ?", ( util.timestamp(), modified, etag, row[0] ) )
						conn.execute( "INSERT INTO [log] ( [logged], [level], [message] ) VALUES ( ?, ?, ? )", ( util.timestamp(), 'DEBUG', 'Updated feed #' + str( row[0] ) + " with " + count + " new entries." ) )
				except Exception, e:
					conn.execute( "INSERT INTO [log] ( [logged], [level], [message] ) VALUES ( ?, ?, ? )", ( util.timestamp(), 'ERROR', 'Error parsing feed #' + str( row[0] ) + ": " + str( e ) ) )

			time.sleep( 30 ) # Arbitrary...
コード例 #12
0
ファイル: Channel.py プロジェクト: pirogoeth/echinus
 def __init__(self, name = None):
     """ create the channel """
     
     self.name = name
     self.time_c = timestamp()
     self.topic = ''
     self.users = []
コード例 #13
0
ファイル: array.py プロジェクト: enki/lazyboy
 def extend(self, iterable):
     """Append multiple records to this array."""
     now = timestamp()
     cfmap = {self.key.column_family: [Column(value, "", now)
                                       for value in iterable]}
     self._get_cas().batch_insert(self.key.keyspace, self.key.key, cfmap,
                                  self.consistency)
コード例 #14
0
def pre_transform(l, r, ctx):
    print("PRE TRANSFORM")

    gather_information(r, ctx)
    pick_package_manager(r, ctx)

    ctx.update({"TRANSFORM_START_TIME": util.timestamp()})
コード例 #15
0
ファイル: auth.py プロジェクト: blueyi/bcloud
def get_bduss(cookie, token, username, password):
    '''获取最重要的登录cookie, 拿到这个cookie后, 就得到了最终的访问授权.

    token - 使用get_token()得到的token值.
    cookie - BAIDUID 这个cookie.
    username - 用户名
    password - 明文密码

    @return 最后会返回一个list, 里面包含了登录*.baidu.com需要的授权cookies.
    '''
    url = const.PASSPORT_URL + '?login'
    data = ''.join([
        'staticpage=http%3A%2F%2Fwww.baidu.com%2Fcache%2Fuser%2Fhtml%2Fv3Jump.html',
        '&charset=utf-8',
        '&token=', token,
        '&tpl=mn&apiver=v3',
        '&tt=', util.timestamp(),
        '&codestring=&safeflg=0&u=https%3A%2F%2Fpassport.baidu.com%2F',
        '&isPhone=false&quick_user=0',
        #'&loginmerge=true&logintype=basicLogin',
        '&usernamelogin=1&spligin=rate',
        '&username='******'&password='******'&verifycode=&mem_pass=on',
        '&ppui_logintime=', get_ppui_logintime(),
        '&callback=parent.bd__pcbs__cb',
        ])
    req = net.urlopen(url, headers={
        'Cookie': cookie.header_output(),
        'Content-type': const.CONTENT_FORM,
        }, data=data.encode())
    return req.headers.get_all('Set-Cookie')
コード例 #16
0
 def thermal_detection_start(self):
     self.cameraThread.start_recording()
     self.data = {
         "__type__": "thermalVideoRecording",
         "recordingDateTime": util.datetimestamp(),
         "recordingTime": util.timestamp()
         }
コード例 #17
0
def get_category(cookie, tokens, category, page=1):
    '''获取一个分类中的所有文件信息, 比如音乐/图片

    目前的有分类有:
      视频 - 1
      音乐 - 2
      图片 - 3
      文档 - 4
      应用 - 5
      其它 - 6
      BT种子 - 7
    '''
    timestamp = util.timestamp()
    url = ''.join([
        const.PAN_API_URL,
        'categorylist?channel=chunlei&clienttype=0&web=1',
        '&category=',
        str(category),
        '&pri=-1&num=100',
        '&t=',
        timestamp,
        '&page=',
        str(page),
        '&order=time&desc=1',
        '&_=',
        timestamp,
        '&bdstoken=',
        cookie.get('STOKEN').value,
    ])
    req = net.urlopen(url, headers={'Cookie': cookie.header_output()})
    if req:
        content = req.data
        return json.loads(content.decode())
    else:
        return None
コード例 #18
0
def list_dir(cookie, tokens, path, page=1, num=100):
    '''得到一个目录中的所有文件的信息(最多100条记录).'''
    timestamp = util.timestamp()
    url = ''.join([
        const.PAN_API_URL,
        'list?channel=chunlei&clienttype=0&web=1',
        '&num=',
        str(num),
        '&t=',
        timestamp,
        '&page=',
        str(page),
        '&dir=',
        encoder.encode_uri_component(path),
        '&t=',
        util.latency(),
        '&order=time&desc=1',
        '&_=',
        timestamp,
        '&bdstoken=',
        tokens['bdstoken'],
    ])
    req = net.urlopen(url,
                      headers={
                          'Content-type':
                          const.CONTENT_FORM_UTF8,
                          'Cookie':
                          cookie.sub_output('BAIDUID', 'BDUSS', 'PANWEB',
                                            'cflag'),
                      })
    if req:
        content = req.data
        return json.loads(content.decode())
    else:
        return None
コード例 #19
0
ファイル: pcs.py プロジェクト: BuddhismZhang/bcloud
def list_share(cookie, tokens, uk, page=1):
    """获取用户已经共享的所有文件的信息

    uk   - user key
    page - 页数, 默认为第一页.
    num  - 一次性获取的共享文件的数量, 默认为100个.
    """
    num = 100
    start = 100 * (page - 1)
    url = "".join(
        [
            const.PAN_URL,
            "pcloud/feed/getsharelist?",
            "&t=",
            util.timestamp(),
            "&categor=0&auth_type=1&request_location=share_home",
            "&start=",
            str(start),
            "&limit=",
            str(num),
            "&query_uk=",
            str(uk),
            "&channel=chunlei&clienttype=0&web=1",
            "&bdstoken=",
            tokens["bdstoken"],
        ]
    )
    req = net.urlopen(url, headers={"Cookie": cookie.header_output(), "Referer": const.SHARE_REFERER})
    if req:
        content = req.data
        return json.loads(content.decode())
    else:
        return None
コード例 #20
0
ファイル: pcs.py プロジェクト: BuddhismZhang/bcloud
def cloud_delete_task(cookie, tokens, task_id):
    """删除一个离线下载任务, 不管这个任务是否已完成下载.

    同时还会把它从下载列表中删除.
    """
    url = "".join(
        [
            const.PAN_URL,
            "rest/2.0/services/cloud_dl",
            "?bdstoken=",
            tokens["bdstoken"],
            "&task_id=",
            str(task_id),
            "&method=delete_task&app_id=250528",
            "&t=",
            util.timestamp(),
            "&channel=chunlei&clienttype=0&web=1",
        ]
    )
    req = net.urlopen(url, headers={"Cookie": cookie.header_output()})
    if req:
        content = req.data
        return json.loads(content.decode())
    else:
        return None
コード例 #21
0
ファイル: auth.py プロジェクト: huangjiang2014/bcloud
def check_login(cookie, token, username):
    """进行登录验证, 主要是在服务器上验证这个帐户的状态.

    如果帐户不存在, 或者帐户异常, 就不需要再进行最后一步的登录操作了.
    这一步有可能需要输入验证码.
    @return 返回errInfo.no, 如果为0, 表示一切正常, 可以登录.
    """
    url = "".join(
        [
            const.PASSPORT_URL,
            "?logincheck",
            "&token=",
            token,
            "&tpl=mm&apiver=v3",
            "&tt=",
            util.timestamp(),
            "&username="******"&isphone=false",
        ]
    )
    req = net.urlopen(url, headers={"Cookie": cookie.header_output()})
    if req:
        return json.loads(req.data.decode())
    else:
        return None
コード例 #22
0
ファイル: plot_sweep.py プロジェクト: chopper6/CRN_simulator
def tri(x, y, z, x_key, y_key, z_key, params, write_params_on_img=False):

    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')

    surf = ax.plot_trisurf(x, y, z, cmap=cm.plasma)
    fig.colorbar(surf)

    ax.set_xlabel(x_key)
    ax.set_ylabel(y_key)
    ax.set_zlabel(z_key)

    if write_params_on_img:
        ax = plt.gca()
        cut = 80
        #ax.text(0,-.2,-1,'PARAMS' + str(params))
        plt.title('Parameters: ' + str(params)[:cut] + '\n' +
                  str(params)[cut:2 * cut] + '\n' +
                  str(params)[2 * cut:3 * cut] + '\n' + str(params)[3 * cut:],
                  fontsize=6)

    fig.tight_layout()
    plt.grid(alpha=.2)

    if params['save_fig']:
        title = params['out_dir'] + util.timestamp(
        ) + '_' + x_key + '_' + y_key + '_' + z_key + '.png'
        plt.savefig(title)
    else:
        plt.show()
    plt.clf()
    plt.cla()
コード例 #23
0
ファイル: pcs.py プロジェクト: BuddhismZhang/bcloud
def cloud_cancel_task(cookie, tokens, task_id):
    """取消离线下载任务.
    
    task_id - 之前建立离线下载任务时的task id, 也可以从cloud_list_task()里
              获取.
    """
    url = "".join(
        [
            const.PAN_URL,
            "rest/2.0/services/cloud_dl",
            "?bdstoken=",
            tokens["bdstoken"],
            "&task_id=",
            str(task_id),
            "&method=cancel_task&app_id=250528",
            "&t=",
            util.timestamp(),
            "&channel=chunlei&clienttype=0&web=1",
        ]
    )
    req = net.urlopen(url, headers={"Cookie": cookie.header_output()})
    if req:
        content = req.data
        return json.loads(content.decode())
    else:
        return None
コード例 #24
0
ファイル: pcs.py プロジェクト: BuddhismZhang/bcloud
def cloud_list_task(cookie, tokens, start=0):
    """获取当前离线下载的任务信息
    
    start - 从哪个任务开始, 从0开始计数, 会获取这50条任务信息
    """
    url = "".join(
        [
            const.PAN_URL,
            "rest/2.0/services/cloud_dl?channel=chunlei&clienttype=0&web=1",
            "&bdstoken=",
            tokens["bdstoken"],
            "&need_task_info=1&status=255",
            "&start=",
            str(start),
            "&limit=50&method=list_task&app_id=250528",
            "&t=",
            util.timestamp(),
        ]
    )
    req = net.urlopen(url, headers={"Cookie": cookie.header_output()})
    if req:
        content = req.data
        return json.loads(content.decode())
    else:
        return None
コード例 #25
0
ファイル: api.py プロジェクト: farberbrodsky/clims
def login_or_cached(auth_data, cache_path=sess_file):
    """Tries to use a cached session, if it's from the last hour"""
    time = timestamp()
    try:
        with open(cache_path, "rb") as f:
            pickled_data = pickle.load(f)
            if pickled_data["username"] == auth_data["username"] and \
                pickled_data["id"] == auth_data["id"] and \
                    pickled_data["password"] == auth_data["password"] and \
                    pickled_data["time"] > (time - 3600):
                # From the last hour, and is of the same parameters
                session = requests.Session()
                session.cookies.update(pickled_data["cookies"])
                return session
    except FileNotFoundError:
        pass
    # Save a new session to the cache
    new_session = login(auth_data)
    with open(cache_path, "wb") as f:
        data = {
            "username": auth_data["username"],
            "id": auth_data["id"],
            "password": auth_data["password"],
            "time": time,
            "cookies": new_session.cookies
        }
        pickle.dump(data, f)
    return new_session
コード例 #26
0
ファイル: generators.py プロジェクト: dpsommer/pf-log-gen
 def _audit_entry(
         self,
         role=None,
         event=None,
         user=None,
         client=None,
         protocol=None,
         grant_type="",
         status=None,
         adapter_id=None,
         description=""):
     response_time = util.Mock.response_time()
     time.sleep(response_time / 1000)
     timestamp = util.timestamp()
     # FIXME: this function is ugly
     return "%s| tid:%s| %s| %s| %s| %s| %s| %s| %s| %s| %s| %s| %s| %s| %s\r\n" % (
         timestamp,
         self.tid,
         event if event is not None else self.event,
         user if user is not None else self.user,
         self.ip,
         "",
         client if client is not None else self.client,
         protocol if protocol is not None else self.protocol,
         grant_type,
         self.host,
         role if role is not None else self.role,
         status if status is not None else self.status,
         adapter_id if adapter_id is not None else self.adapter_id,
         description,
         response_time
     )
コード例 #27
0
def list_share(cookie, tokens, uk, page=1):
    '''获取用户已经共享的所有文件的信息

    uk   - user key
    page - 页数, 默认为第一页.
    num  - 一次性获取的共享文件的数量, 默认为100个.
    '''
    num = 100
    start = 100 * (page - 1)
    url = ''.join([
        const.PAN_URL,
        'pcloud/feed/getsharelist?',
        '&t=',
        util.timestamp(),
        '&categor=0&auth_type=1&request_location=share_home',
        '&start=',
        str(start),
        '&limit=',
        str(num),
        '&query_uk=',
        str(uk),
        '&channel=chunlei&clienttype=0&web=1',
        '&bdstoken=',
        tokens['bdstoken'],
    ])
    req = net.urlopen(url,
                      headers={
                          'Cookie': cookie.header_output(),
                          'Referer': const.SHARE_REFERER,
                      })
    if req:
        content = req.data
        return json.loads(content.decode())
    else:
        return None
コード例 #28
0
ファイル: pcs.py プロジェクト: BuddhismZhang/bcloud
def cloud_query_task(cookie, tokens, task_ids):
    """查询离线下载任务的信息, 比如进度, 是否完成下载等.

    最好先用cloud_list_task() 来获取当前所有的任务, 然后调用这个函数来获取
    某项任务的详细信息.

    task_ids - 一个list, 里面至少要有一个task_id, task_id 是一个字符串
    """
    url = "".join(
        [
            const.PAN_URL,
            "rest/2.0/services/cloud_dl?method=query_task&app_id=250528",
            "&bdstoken=",
            tokens["bdstoken"],
            "&task_ids=",
            ",".join(task_ids),
            "&t=",
            util.timestamp(),
            "&channel=chunlei&clienttype=0&web=1",
        ]
    )
    req = net.urlopen(url, headers={"Cookie": cookie.header_output()})
    if req:
        content = req.data
        return json.loads(content.decode())
    else:
        return None
コード例 #29
0
def restore_trash(cookie, tokens, fidlist):
    '''从回收站中还原文件/目录.

    fildlist - 要还原的文件/目录列表, fs_id.
    '''
    url = ''.join([
        const.PAN_API_URL,
        'recycle/restore?channel=chunlei&clienttype=0&web=1',
        '&t=',
        util.timestamp(),
        '&bdstoken=',
        tokens['bdstoken'],
    ])
    data = 'fidlist=' + encoder.encode_uri_component(json.dumps(fidlist))
    req = net.urlopen(url,
                      headers={
                          'Cookie': cookie.header_output(),
                          'Content-type': const.CONTENT_FORM_UTF8,
                      },
                      data=data.encode())
    if req:
        content = req.data
        return json.loads(content.decode())
    else:
        return None
コード例 #30
0
def cloud_query_task(cookie, tokens, task_ids):
    '''查询离线下载任务的信息, 比如进度, 是否完成下载等.

    最好先用cloud_list_task() 来获取当前所有的任务, 然后调用这个函数来获取
    某项任务的详细信息.

    task_ids - 一个list, 里面至少要有一个task_id, task_id 是一个字符串
    '''
    url = ''.join([
        const.PAN_URL,
        'rest/2.0/services/cloud_dl?method=query_task&app_id=250528',
        '&bdstoken=',
        tokens['bdstoken'],
        '&task_ids=',
        ','.join(task_ids),
        '&t=',
        util.timestamp(),
        '&channel=chunlei&clienttype=0&web=1',
    ])
    req = net.urlopen(url, headers={'Cookie': cookie.header_output()})
    if req:
        content = req.data
        return json.loads(content.decode())
    else:
        return None
コード例 #31
0
ファイル: db.py プロジェクト: Yas3r/svwa-1
def insert_post(forum_id, thread_id, message, user_id, first_post=False, timestamp=None):
    if timestamp is None:
        timestamp = str(util.timestamp())
    query_db('INSERT INTO posts (author, thread, message, time, first_post) VALUES\
            (' + str(user_id) + ',' + str(thread_id) + ',"' + message + '",' + timestamp +',' + str(b2i(first_post)) + ')')
    query_db('UPDATE forums SET post_count = post_count + 1 WHERE id = ' + str(forum_id))
    query_db('UPDATE threads SET post_count = post_count + 1 WHERE id = ' + str(thread_id))
コード例 #32
0
ファイル: pcs.py プロジェクト: lubing521/bcloud
def list_share(cookie, tokens, uk, page=1):
    '''获取用户已经共享的所有文件的信息

    uk   - user key
    page - 页数, 默认为第一页.
    num  - 一次性获取的共享文件的数量, 默认为100个.
    '''
    num = 100
    start = 100 * (page - 1)
    url = ''.join([
        const.PAN_URL,
        'pcloud/feed/getsharelist?',
        '&t=', util.timestamp(),
        '&categor=0&auth_type=1&request_location=share_home',
        '&start=', str(start),
        '&limit=', str(num),
        '&query_uk=', str(uk),
        '&channel=chunlei&clienttype=0&web=1',
        '&bdstoken=', tokens['bdstoken'],
        ])
    req = net.urlopen(url, headers={
        'Cookie': cookie.header_output(),
        'Referer': const.SHARE_REFERER,
        })
    if req:
        content = req.data
        return json.loads(content.decode())
    else:
        return None
コード例 #33
0
ファイル: auth.py プロジェクト: huangjiang2014/bcloud
def refresh_sigin_vcode(cookie, token, vcodetype):
    """刷新验证码.

    vcodetype - 在调用check_login()时返回的vcodetype.
    """
    url = "".join(
        [
            const.PASSPORT_BASE,
            "v2/?reggetcodestr",
            "&token=",
            token,
            "&tpl=netdisk&apiver=v3",
            "&tt=",
            util.timestamp(),
            "&fr=ligin",
            "&vcodetype=",
            vcodetype,
        ]
    )
    req = net.urlopen(url, headers={"Cookie": cookie.header_output()})
    if req:
        try:
            return json.loads(req.data.decode("gb18030"))
        except ValueError as e:
            print(e)
    return None
コード例 #34
0
ファイル: pcs.py プロジェクト: blueyi/bcloud
def get_category(cookie, tokens, category, page=1):
    '''获取一个分类中的所有文件信息, 比如音乐/图片

    目前的有分类有:
      视频 - 1
      音乐 - 2
      图片 - 3
      文档 - 4
      应用 - 5
      其它 - 6
      BT种子 - 7
    '''
    timestamp = util.timestamp()
    url = ''.join([
        const.PAN_API_URL,
        'categorylist?channel=chunlei&clienttype=0&web=1',
        '&category=', str(category),
        '&pri=-1&num=100',
        '&t=', timestamp,
        '&page=', str(page),
        '&order=time&desc=1',
        '&_=', timestamp,
        '&bdstoken=', cookie.get('STOKEN').value,
        ])
    req = net.urlopen(url, headers={'Cookie': cookie.header_output()})
    if req:
        content = req.data
        return json.loads(content.decode())
    else:
        return None
コード例 #35
0
ファイル: pcs.py プロジェクト: BuddhismZhang/bcloud
def cloud_query_sinfo(cookie, tokens, source_path):
    """获取网盘中种子的信息, 比如里面的文件名, 文件大小等.

    source_path - BT种子的绝对路径.
    """
    url = "".join(
        [
            const.PAN_URL,
            "rest/2.0/services/cloud_dl?channel=chunlei&clienttype=0&web=1",
            "&method=query_sinfo&app_id=250528",
            "&bdstoken=",
            tokens["bdstoken"],
            "&source_path=",
            encoder.encode_uri_component(source_path),
            "&type=2",
            "&t=",
            util.timestamp(),
        ]
    )
    req = net.urlopen(url, headers={"Cookie": cookie.header_output()})
    if req:
        content = req.data
        return json.loads(content.decode())
    else:
        return None
コード例 #36
0
ファイル: auth.py プロジェクト: BuddhismZhang/bcloud
def get_bduss(cookie, token, username, password, verifycode='', codeString=''):
    '''获取最重要的登录cookie, 拿到这个cookie后, 就得到了最终的访问授权.

    token      - 使用get_token()得到的token值.
    cookie     - BAIDUID 这个cookie.
    username   - 用户名
    password   - 明文密码
    verifycode - 用户根据图片输入的四位验证码, 可以为空
    codeString - 获取验证码图片时用到的codeString, 可以为空

    @return (status, info). 其中, status表示返回的状态:
      0 - 正常, 这里, info里面存放的是auth_cookie
     -1 - 未知异常
      4 - 密码错误
    257 - 需要输入验证码, 此时info里面存放着(vcodetype, codeString))
    '''
    url = const.PASSPORT_URL + '?login'
    data = ''.join([
        'staticpage=http%3A%2F%2Fwww.baidu.com%2Fcache%2Fuser%2Fhtml%2Fv3Jump.html',
        '&charset=utf-8',
        '&token=', token,
        '&tpl=mn&apiver=v3',
        '&tt=', util.timestamp(),
        '&codestring=', codeString,
        '&safeflg=0&u=https%3A%2F%2Fpassport.baidu.com%2F',
        '&isPhone=false&quick_user=0',
        '&loginmerge=true&logintype=basicLogin&logLoginType=pc_loginBasic',
        '&username='******'&password='******'&verifycode=', verifycode,
        '&mem_pass=on',
        '&ppui_logintime=', get_ppui_logintime(),
        '&callback=parent.bd__pcbs__cb',
        ])

    req = net.urlopen(url, headers={
        'Cookie': cookie.header_output(),
        'Content-type': const.CONTENT_FORM,
        'Accept': const.ACCEPT_HTML,
        }, data=data.encode())
    if req:
        auth_cookie = req.headers.get_all('Set-Cookie')
        if auth_cookie:
            return (0, auth_cookie)
        resp_content= req.data.decode()
        match = re.findall('"(err_no[^"]+)"', resp_content)
        if len(match) != 1:
            return (-1, None)
        query = dict(urllib.parse.parse_qsl(match[0]))
        err_no = int(query.get('err_no', '-1'))
        if err_no != 257:
            return (err_no, None)
        vcodetype = query.get('vcodetype', '')
        codeString = query.get('codeString', '')
        if vcodetype and codeString:
            return (257, (vcodetype, codeString))
        return (-1, None)
    else:
        return (-1, None)
コード例 #37
0
def pre_deploy(c, local, context):
    # TODO: async pre-deploy
    log.info("PRE DEPLOY")
    context.update({"DEPLOY_START_TIME": util.timestamp()})
    check_local_git_repo(local, context)
    check_deployment(c, context)
    check_versions(c, context)
    check_dependencies(c, context)
コード例 #38
0
    def write(self):
        '''
		Write the personal ledger to file
		'''
        self.ledger['timestamp'] = timestamp()
        output_f = open(self.fp, 'w')
        output_f.write(json.dumps(self.ledger, sort_keys=True))
        output_f.close()
コード例 #39
0
def main():
    global FEATUREPREFIX

    parser = get_argparser()
    args = parser.parse_args()

    FEATUREPREFIX = args.featureprefix

    util.DPRINT = args.dprint
    trainingdata.STOPWORDS = trainingdata.load_stopwords(args.bitextfn)

    print("## RUNNING EXPERIMENT on {0} with features {1}".format(
        os.path.basename(args.bitextfn), "doc2vec"))

    triple_sentences = trainingdata.load_bitext(args.bitextfn, args.alignfn)
    tl_sentences = trainingdata.get_target_language_sentences(triple_sentences)
    sl_sentences = [s for (s,t,a) in triple_sentences]
    tagged_sentences = [list(zip(ss, ts))
                        for ss,ts in zip(sl_sentences, tl_sentences)]
    trainingdata.set_examples(sl_sentences, tagged_sentences)

    source_annotated = annotated_corpus.load_corpus(args.annotatedfn)
    trainingdata.set_sl_annotated(source_annotated)

    language_pair = args.bitextfn.split(".")[1]
    print(language_pair)
    top_words = list_focus_words.load_top_words(language_pair)

    ## default is 1e-4.
    THETOL = 1e-4
    classifier_pairs = []
    classifier = MLPClassifier(solver='lbfgs', alpha=THETOL,
                               hidden_layer_sizes=(20,20))
    classifier_pairs.append(("mlp-20-20", classifier))

    classifier = LogisticRegression(C=1, penalty='l1', tol=THETOL)
    classifier_pairs.append(("maxent-l1-c1", classifier))

    classifier = LogisticRegression(C=1, penalty='l2', tol=THETOL)
    classifier_pairs.append(("maxent-l2-c1", classifier))

    classifier = LinearSVC(C=1, penalty='l2', tol=THETOL)
    classifier_pairs.append(("linearsvc-l2-c1", classifier))

    classifier = RandomForestClassifier()
    classifier_pairs.append(("random-forest-default", classifier))

    classifier = KNeighborsClassifier()
    classifier_pairs.append(("k-neighbors-default", classifier))

    stamp = util.timestamp() + "-" + language_pair
    featureset_name = "doc2vec"

    for (clname, classifier) in classifier_pairs:
        casename = "{0}-{1}-regular".format(clname, featureset_name)
        do_a_case(classifier, top_words, False, casename, stamp)
        casename = "{0}-{1}-nonnull".format(clname, featureset_name)
        do_a_case(classifier, top_words, True, casename, stamp)
コード例 #40
0
def process(text, commands):
	keys = tuple(commands)
	text = text.lower()

	for key in keys:
		if text.startswith(key):
			rest = text.replace(key, '').strip()
			timestamp(f'Recognized <{key}>\n')()  

			processed = commands[key](rest)

			# walrus
			if processed:
				text = processed
			else:
				text = rest

	return prettify(text)
コード例 #41
0
ファイル: db.py プロジェクト: Yas3r/svwa-1
def insert_thread(forum_id, topic, message, user_id):
    timestamp = str(util.timestamp())
    query_db('INSERT INTO threads (author, forum, title, time, post_count)\
                VALUES (' + str(user_id) + ',' + str(forum_id) + ',"' + topic + '",' + timestamp + ',0)')
    thread_id = g.cursor.lastrowid
    app.logger.debug("Inserting thread #" + str(thread_id))
    query_db('UPDATE forums SET thread_count = thread_count + 1 WHERE id = ' + str(forum_id))
    insert_post(forum_id, thread_id, message, user_id, timestamp)
    return thread_id
コード例 #42
0
ファイル: pcs.py プロジェクト: BuddhismZhang/bcloud
def cloud_add_bt_task(cookie, tokens, source_url, save_path, selected_idx, file_sha1="", vcode="", vcode_input=""):
    """新建一个BT类的离线下载任务, 包括magent磁链.

    source_path  - BT种子所在的绝对路径
    save_path    - 下载的文件要存放到的目录
    selected_idx - BT种子中, 包含若干个文件, 这里, 来指定要下载哪些文件,
                   从1开始计数.
    file_sha1    - BT种子的sha1值, 如果是magent的话, 这个sha1值可以为空
    vcode        - 验证码的vcode
    vcode_input  - 用户输入的四位验证码
    """
    url = "".join(
        [
            const.PAN_URL,
            "rest/2.0/services/cloud_dl?channel=chunlei&clienttype=0&web=1",
            "&bdstoken=",
            tokens["bdstoken"],
        ]
    )
    type_ = "2"
    url_type = "source_path"
    if source_url.startswith("magnet:"):
        type_ = "4"
        url_type = "source_url"
    if not save_path.endswith("/"):
        save_path = save_path + "/"
    data = [
        "method=add_task&app_id=250528",
        "&file_sha1=",
        file_sha1,
        "&save_path=",
        encoder.encode_uri_component(save_path),
        "&selected_idx=",
        ",".join(str(i) for i in selected_idx),
        "&task_from=1",
        "&t=",
        util.timestamp(),
        "&",
        url_type,
        "=",
        encoder.encode_uri_component(source_url),
        "&type=",
        type_,
    ]
    if vcode:
        data.append("&input=")
        data.append(vcode_input)
        data.append("&vcode=")
        data.append(vcode)
    data = "".join(data)
    req = net.urlopen(url, headers={"Cookie": cookie.header_output()}, data=data.encode())
    if req:
        content = req.data
        return json.loads(content.decode())
    else:
        return None
コード例 #43
0
def squawk_to_json(squawk):
    return {
        'recipient': squawk['recipient'],
        'thread_members': squawk['thread_members'],
        'sender': squawk['sender'],
        'date': timestamp(squawk['date']),
        '_id': str(squawk['_id']),
        'listened': squawk['listened'],
        'thread_identifier': squawk.get('thread_identifier', "")
    }
コード例 #44
0
ファイル: maze.py プロジェクト: nkruglikov/maze
 def save(self, filename=util.timestamp()+".maze"):
     """ Saves Maze to pickle dump. """
     data = (self.size_x, self.size_y, self.walls,
             self.objects, self.__cells)
     try:
         with open(filename, "wb") as fh:
             pickle.dump(data, fh, 3)
     except (EnvironmentError, pickle.PicklingError) as err:
         print("[!] Error while "
                 "saving maze to {0}: {1}".format(filename, err))
コード例 #45
0
 def start_recording(self):
     print("Starting recording.")
     self.recording = True
     self.startTime = time.time()
     self.recordingFolder = "./ir_videos/"+str(int(time.time()*1000))+".h264"
     self.camera.start_recording(self.recordingFolder)
     self.data = {
         "recordingDateTime": util.datetimestamp(),
         "recordingTime": util.timestamp()
         }
コード例 #46
0
ファイル: logger.py プロジェクト: jgrahamc/gaga
def log(m):
    t = util.timestamp()
    d = '%s: %s\r' % (t, m)
    SER.send(d)
    try:
        f = open( log_file, 'a' )
        f.write(d)
        f.close()
    except:
        return
コード例 #47
0
ファイル: logger.py プロジェクト: wal99d/gaga
def log(m):
    t = util.timestamp()
    d = '%s: %s\r' % (t, m)
    SER.send(d)
    try:
        f = open(log_file, 'a')
        f.write(d)
        f.close()
    except:
        return
コード例 #48
0
def drawG_by_dendos(G,localD, globalD, params):
	# pass a subgraph induced by union seed community local + global 
	plt.figure(figsize=(10,6))
	node_size, nalpha, ealpha = 200, .5, .2
	
	pos = get_layout(params,G)

	nodes = list(G.nodes())

	#for node in nodes:
	#	assert(node in np.array(localD).flatten() or node in np.array(globalD).flatten())

	labels = {n:'' for n in G.nodes()}
	colors, bs, rs = [],{n:0 for n in G.nodes()},{n:0 for n in G.nodes()}
	for i in rng(localD):
		for n in localD[i]:
			if n in nodes:
				bs[n] = (len(localD)-i)/len(localD)
				labels[n] += 'L'
	for i in rng(globalD):
		for n in globalD[i]:
			if n in nodes:
				rs[n] = (len(globalD)-i)/len(globalD)
				if labels[n] not in ['G','LG']:
					labels[n] += 'G'
	labels[params['seeds'][0]] = 'Seed'

	alphas, sizes = [],[]
	for i in rng(nodes):
		node = nodes[i]
		if node == params['seeds'][0]:
			colors += [(0,0,1)]
			alphas += [1]
			sizes += [1000]
		else:
			colors += [(rs[node],bs[node],0)]
			alphas += [max(math.pow(bs[node],.3),.05)]
			sizes += [max(rs[node],.05)*1000]

	nx.draw_networkx_nodes(G, pos, nodelist=nodes, node_color=colors, node_size=node_size, alpha=nalpha)
	#labels = {n:labels[n] for n in G.nodes()} 
	labels = {n:n for n in G.nodes()} 
	nx.draw_networkx_labels(G, pos, labels, font_size=8, font_color='black')

	plt.title("Comparison of Hierarchical Communities", fontsize=26)
	elist = sorted(list(G.edges()))
	nx.draw_networkx_edges(G, pos, arrows=True, edgelist=elist, alpha=ealpha) 

	if params['save_fig']:
		tstamp = timestamp()
		plt.savefig(params['output_path']+'/'+str(tstamp)+'_dendos.png')
	else:
		plt.show()
	plt.clf()
	plt.close()
コード例 #49
0
def cloud_add_bt_task(cookie,
                      tokens,
                      source_url,
                      save_path,
                      selected_idx,
                      file_sha1='',
                      vcode='',
                      vcode_input=''):
    '''新建一个BT类的离线下载任务, 包括magent磁链.

    source_path  - BT种子所在的绝对路径
    save_path    - 下载的文件要存放到的目录
    selected_idx - BT种子中, 包含若干个文件, 这里, 来指定要下载哪些文件,
                   从1开始计数.
    file_sha1    - BT种子的sha1值, 如果是magent的话, 这个sha1值可以为空
    vcode        - 验证码的vcode
    vcode_input  - 用户输入的四位验证码
    '''
    url = ''.join([
        const.PAN_URL,
        'rest/2.0/services/cloud_dl?channel=chunlei&clienttype=0&web=1',
        '&bdstoken=',
        tokens['bdstoken'],
    ])
    type_ = '2'
    url_type = 'source_path'
    if source_url.startswith('magnet:'):
        type_ = '4'
        url_type = 'source_url'
    if not save_path.endswith('/'):
        save_path = save_path + '/'
    data = [
        'method=add_task&app_id=250528', '&file_sha1=', file_sha1,
        '&save_path=',
        encoder.encode_uri_component(save_path), '&selected_idx=',
        ','.join(str(i) for i in selected_idx), '&task_from=1', '&t=',
        util.timestamp(), '&', url_type, '=',
        encoder.encode_uri_component(source_url), '&type=', type_
    ]
    if vcode:
        data.append('&input=')
        data.append(vcode_input)
        data.append('&vcode=')
        data.append(vcode)
    data = ''.join(data)
    req = net.urlopen(url,
                      headers={
                          'Cookie': cookie.header_output(),
                      },
                      data=data.encode())
    if req:
        content = req.data
        return json.loads(content.decode())
    else:
        return None
コード例 #50
0
    def extend(self, iterable):
        """Append multiple records to this array."""
        now = timestamp()
        columns = [Column(value, "", now) for value in iterable]
        mutations = [Mutation(column_or_supercolumn=ColumnOrSuperColumn(column=col))
                     for col in columns]

        mutation_map = {key.key:
                        {key.column_family: mutations}}
                                                            
        self._get_cas().batch_mutate(mutation_map, consistency)
コード例 #51
0
    def add(self, blockname, locations):
        '''
		Add a block to the giant ledger
		'''
        assert type(blockname) is str, 'Filename must be a string'
        self.ledger[blockname] = {
            'timestamp': timestamp(),
            'locations': locations,
            'valid': True
        }
        self.write()
コード例 #52
0
ファイル: topwords.py プロジェクト: alexrudnick/chipa
def main():
    parser = get_argparser()
    args = parser.parse_args()

    if not args.usetarget:
        trainingdata.STOPWORDS = trainingdata.load_stopwords(args.bitextfn)

    triple_sentences = trainingdata.load_bitext(args.bitextfn, args.alignfn)

    if args.usetarget:
        ## Flip directionality -- we want the top words out of the target text.
        new_triple_sentences = [(t, s, a) for (s, t, a) in triple_sentences]
        triple_sentences = new_triple_sentences

    sl_sentences = [s for (s,t,a) in triple_sentences]
    top_words = trainingdata.get_top_words(sl_sentences)

    with open("topwords.txt", "w") as topwordsout:
        for (i, (word, count)) in enumerate(top_words):
            print("{0} & {1} & {2} \\\\".format(1+i, word, count),
                  file=topwordsout)

    if args.usetarget:
        ## Bail out -- just getting target text top words.
        return

    tl_sentences = trainingdata.get_target_language_sentences(triple_sentences)
    tagged_sentences = [list(zip(ss, ts))
                        for ss,ts in zip(sl_sentences, tl_sentences)]
    trainingdata.set_examples(sl_sentences, tagged_sentences)
    source_annotated = annotated_corpus.load_corpus(args.annotatedfn)
    trainingdata.set_sl_annotated(source_annotated)

    stamp = util.timestamp()
    langs = args.bitextfn.split(".")[1]
    translations_fn = "results/{0}-{1}-translations".format(stamp, langs)
    entropy_fn = "results/{0}-{1}-entropy".format(stamp, langs)

    with open(translations_fn, "w") as topwordsout, \
         open(entropy_fn, "w") as entropyout:
         for (i, (word, count)) in enumerate(top_words):
            training = trainingdata.trainingdata_for(word, nonnull=False)
            labels = [label for (feat,label) in training]
            counts = Counter(labels)
            translations_l = []
            for label, count in counts.most_common(5):
                if label == UNTRANSLATED:
                    label = "NULL"
                translations_l.append("{0}".format(label))
            translations = ", ".join(translations_l)
            print("{0} & {1}".format(word, translations), file=topwordsout)

            bits = entropy(labels)
            print("%30s%30.2f" % (word, bits), file=entropyout)
コード例 #53
0
    def send_Action(self, *args):
        if not self.peer.sock: return 'break'

        text = self.send_Text.get('1.0', END)
        text = text.strip()

        self.send_Text.delete('1.0', END)

        if not text: return 'break'

        # /me is a "social command", so it's exempt from command processing
        if text[0] == '/' and not text.startswith('/me '):
            if text == '/bye':
                self.send_command('BYE ')
            elif text.startswith('/nick'):
                name = text[6:]
                if len(name):
                    self.send_command('NICK', data=name)
                    self.nick = name
                    self.append_text('You are now known as {}\n'.format(name))
            elif text == '/text':
                self.send_command('TEXT')
                self.send_mode = Mode.text
                self.send_header_once()
            elif text == '/json':
                self.send_command('JSON')
                self.send_mode = Mode.json
                self.send_header_once()
            elif text == '/msgpack':
                self.send_command('MPCK')
                self.send_mode = Mode.msgpack
                self.send_header_once()
            else:
                self.append_text('Unrecognized command: {}\n'.format(text))
        else:
            self.append_text('{} {}: {}\n'.format(timestamp(), self.nick,
                                                  text))

            msg = self.send_proto.message(text,
                                          to=self.peer.name,
                                          From=str(self.address or ''))

            if _test_fragment:
                t = len(msg)
                h = t // 2

                self.peer.sendall(msg[:h])
                time.sleep(0.1)
                self.peer.sendall(msg[h:])
            else:
                self.peer.sendall(msg)

        # Prevent default handler from adding a newline to the input textbox
        return 'break'
コード例 #54
0
    def stop_clock(self):
        cache.cleanup()
        self.stopwatch_service.stop()

        duration = self.stopwatch_service.duration()
        self.analytics_service.add_metric("duration", ("%s" % duration))
        self.analytics_service.submit()

        logger.allspark("")
        logger.allspark(">>> allspark took %s and finished at %s >>>" %
                        (duration, util.timestamp()))
        logger.allspark("")
コード例 #55
0
 def _apply(self, func, *arg) -> int:
     """apply the given function with the given args
     functioon expected:
         * brightness_writer
         * brightness_increment
         * brightness_drecrement
     """
     result = -1
     if self._validate_time():
         self.last_change = util.timestamp()
         result = func(*arg)
     return result
コード例 #56
0
def send(phone, text):
    logger.log('sms.send(%s...,"%s")' % (phone[0:4], text))
    if can():
        r = at.cmd('AT+CMGS="%s"' % phone)
        if r == '\r\n> ':
            t = util.timestamp()
            r = at.raw('%s: %s\x1A' % (t, text), 30)
        else:
            r = at.raw('\x1B')
            logger.log('Failed to get SMS prompt')
    else:
        logger.log('No GSM access for SMS send')
コード例 #57
0
def post_deploy(c, context):
    # TODO: async post-deploy
    log.info("POST DEPLOY")

    # TODO: only restart services whoose config has changed, set in context?

    # TODO: clear redis view-cache?
    restart_service(c, "redis")
    restart_service(c, "nginx")
    restart_service(c, "pubpublica")

    context.update({"DEPLOY_END_TIME": util.timestamp()})
コード例 #58
0
ファイル: jointrecording.py プロジェクト: tlack/dobot-tcn
def init():
    print(f'camera sources: {CAMERAS}')
    
    dev = bot.start(PORT)
    pose = bot.pose()
    print(f'initial pose: {pose}')
    if len(pose.keys()) == 0 or len(bot.movable_joints()) == 0:
        print(f'no joints active! cannot continue.')
        sys.exit(1)

    exp = promptexperiment()
    path = os.path.join(DATA, TOOL, f'{exp}-{timestamp()}')
    os.makedirs(path, exist_ok=True)
    print(f'path: {path}')

    for i in range(N_EXAMPLES):
        rs1 = bot.pose()
        print(f'rs1: {rs1}')

        ts = timestamp()
        test_slug = f"{i}-{ts}"
        jf = os.path.join(path, f"{test_slug}-joints.json")
        startcams(test_slug)
        time.sleep(PRE_DELAY)

        goal = {x: tweak(x, rs1[x]) for x in rs1}
        print(f'goal: {goal}')
        # todo check validity
        bot.move_wait(goal)

        rs2 = bot.pose()
        print(f'rs2: {rs2}')

        rsj = json.dumps({"from": rs1, "to": rs2, "goal": goal})
        open(jf, 'w').write(rsj)

        time.sleep(POST_DELAY)

        stopcams()
        vids = collectcams(test_slug)
        for cam, data in vids.items():
            vf = os.path.join(path, f"{test_slug}-{cam}.h264")
            open(vf, 'wb').write(data)
            if USE_REVIEW_TOOL:
                if i == 0 or random.random() > REVIEW_FREQ:
                    os.system(REVIEW_TOOL_COMMAND.format(vf))

        bot.move_wait(rs1)
        time.sleep(EXAMPLE_DELAY)
        print(f"saved to path: {path}")

    bot.close()
コード例 #59
0
    def _resetOptions(self, **kw):
        #
        # Get all the options first
        #
        self.method = kw.get("method", "corkscrew")
        self.rate = kw.get("rate", "linear")
        self.frames = kw.get("frames", 20)
        self.minimize = kw.get("minimize", False)
        self.steps = kw.get("steps", 60)

        #
        # Set up minimization apparatus if needed
        #
        if self.minimize and self._mi is None:
            from MMMD import MMTKinter
            from util import timestamp
            timestamp("Setting up MMTK universe")
            self._mi = MMTKinter.MMTKinter([self.mol],
                                           nogui=True,
                                           ljOptions=10.0,
                                           esOptions=10.0)
            timestamp("Finished setting up MMTK universe")