Exemplo n.º 1
0
def lambda_handler(event, context):
    s3 = boto3.resource("s3")
    s3_client = boto3.client("s3")

    print("Script starting at %s\n" % (get_timestamp()))
    to_download = clamav.update_defs_from_s3(s3_client,
                                             AV_DEFINITION_S3_BUCKET,
                                             AV_DEFINITION_S3_PREFIX)

    for download in to_download.values():
        s3_path = download["s3_path"]
        local_path = download["local_path"]
        print("Downloading definition file %s from s3://%s" %
              (local_path, s3_path))
        s3.Bucket(AV_DEFINITION_S3_BUCKET).download_file(s3_path, local_path)
        print("Downloading definition file %s complete!" % (local_path))

    clamav.update_defs_from_freshclam(AV_DEFINITION_PATH, CLAMAVLIB_PATH)
    # If main.cvd gets updated (very rare), we will need to force freshclam
    # to download the compressed version to keep file sizes down.
    # The existence of main.cud is the trigger to know this has happened.
    if os.path.exists(os.path.join(AV_DEFINITION_PATH, "main.cud")):
        os.remove(os.path.join(AV_DEFINITION_PATH, "main.cud"))
        if os.path.exists(os.path.join(AV_DEFINITION_PATH, "main.cvd")):
            os.remove(os.path.join(AV_DEFINITION_PATH, "main.cvd"))
        clamav.update_defs_from_freshclam(AV_DEFINITION_PATH, CLAMAVLIB_PATH)
    clamav.upload_defs_to_s3(s3_client, AV_DEFINITION_S3_BUCKET,
                             AV_DEFINITION_S3_PREFIX, AV_DEFINITION_PATH)
    print("Script finished at %s\n" % get_timestamp())
Exemplo n.º 2
0
 def timeouthandler(self):
     if self in connections_waiting:
         connections_waiting.remove(self)
         if not self.command in ["get_message", "scope_message"]:
             print ">>> failed, wrong connection type in queue"
         self.out_buffer += self.RESPONSE_TIMEOUT % get_timestamp()
     else:
         self.out_buffer += NOT_FOUND % (get_timestamp(), 0, '')
     self.timeout = 0
Exemplo n.º 3
0
 def timeouthandler(self):
     if self in connections_waiting:
         connections_waiting.remove(self)
         if not self.command in ["get_message", "scope_message"]:
             print ">>> failed, wrong connection type in queue"
         self.out_buffer += self.RESPONSE_TIMEOUT % get_timestamp()
     else:
         self.out_buffer += NOT_FOUND % (get_timestamp(), 0, '')
     self.timeout = 0
Exemplo n.º 4
0
def lambda_handler(event, context):
    s3 = boto3.resource("s3", endpoint_url=S3_ENDPOINT)
    s3_client = boto3.client("s3", endpoint_url=S3_ENDPOINT)
    sns_client = boto3.client("sns", endpoint_url=SNS_ENDPOINT)

    start_clamd(s3, s3_client)

    # Get some environment variables
    EVENT_SOURCE = os.getenv("EVENT_SOURCE", "S3")

    start_time = get_timestamp()
    logging.debug("Script starting at %s\n" % (start_time))
    s3_object = event_object(event, event_source=EVENT_SOURCE)

    if str_to_bool(AV_PROCESS_ORIGINAL_VERSION_ONLY):
        verify_s3_object_version(s3, s3_object)

    # Publish the start time of the scan
    if AV_SCAN_START_SNS_ARN not in [None, ""]:
        start_scan_time = get_timestamp()
        sns_start_scan(sns_client, s3_object, AV_SCAN_START_SNS_ARN,
                       start_scan_time)

    with tempfile.TemporaryDirectory(prefix=EFS_SCAN_FILE_PATH) as tmpdirname:
        file_path = get_local_path(s3_object, tmpdirname)
        create_dir(os.path.dirname(file_path))
        s3_object.download_file(file_path)

        scan_result, scan_signature = clamav.scan_file(file_path)
        logging.info(
            "Scan of s3://%s resulted in %s\n" %
            (os.path.join(s3_object.bucket_name, s3_object.key), scan_result))

        result_time = get_timestamp()
        # Set the properties on the object with the scan results
        if "AV_UPDATE_METADATA" in os.environ:
            set_av_metadata(s3_object, scan_result, scan_signature,
                            result_time)
        set_av_tags(s3_client, s3_object, scan_result, scan_signature,
                    result_time)

        # Publish the scan results
        if AV_STATUS_SNS_ARN not in [None, ""]:
            sns_scan_results(
                sns_client,
                s3_object,
                AV_STATUS_SNS_ARN,
                scan_result,
                scan_signature,
                result_time,
            )

    stop_scan_time = get_timestamp()
    logging.debug("Script finished at %s\n" % stop_scan_time)
Exemplo n.º 5
0
def lambda_handler(event, context):
    s3 = boto3.resource("s3")
    sns_client = boto3.client("sns")

    # Get some environment variables
    EVENT_SOURCE = os.getenv("EVENT_SOURCE", "S3")

    start_time = get_timestamp()
    print("Script starting at %s\n" % (start_time))
    s3_object = event_object(event, event_source=EVENT_SOURCE)

    if str_to_bool(AV_PROCESS_ORIGINAL_VERSION_ONLY):
        verify_s3_object_version(s3, s3_object)

    # Publish the start time of the scan
    if AV_SCAN_START_SNS_ARN not in [None, ""]:
        start_scan_time = get_timestamp()
        sns_start_scan(sns_client, s3_object, AV_SCAN_START_SNS_ARN,
                       start_scan_time)

    file_path = get_local_path(s3_object, "/tmp")
    create_dir(os.path.dirname(file_path))
    try:
        s3_object.download_file(file_path)
    except OSError as e:
        remove_file(file_path)
        if e.errno == 28:
            print("Ran out of disk space. Scan failed")
            publish_results(s3_object, AV_STATUS_FAILED,
                            "File too large to scan")
            return
        else:
            raise

    download_clamav_databases()

    scan_result, scan_signature = clamav.scan_file(file_path)
    print("Scan of s3://%s resulted in %s\n" %
          (os.path.join(s3_object.bucket_name, s3_object.key), scan_result))

    publish_results(s3_object, scan_result, scan_signature)

    # Delete downloaded file to free up room on re-usable lambda function container
    remove_file(file_path)
    if str_to_bool(
            AV_DELETE_INFECTED_FILES) and scan_result == AV_STATUS_INFECTED:
        delete_s3_object(s3_object)
    stop_scan_time = get_timestamp()
    print("Script finished at %s\n" % stop_scan_time)
Exemplo n.º 6
0
def publish_results(s3_object, scan_result, scan_signature):
    result_time = get_timestamp()
    sns_client = boto3.client("sns")
    s3_client = boto3.client("s3")
    ENV = os.getenv("ENV", "")
    # Set the properties on the object with the scan results
    if "AV_UPDATE_METADATA" in os.environ:
        set_av_metadata(s3_object, scan_result, scan_signature, result_time)
    set_av_tags(s3_client, s3_object, scan_result, scan_signature, result_time)

    # Publish the scan results
    if AV_STATUS_SNS_ARN not in [None, ""]:
        sns_scan_results(
            sns_client,
            s3_object,
            AV_STATUS_SNS_ARN,
            scan_result,
            scan_signature,
            result_time,
        )

    metrics.send(env=ENV,
                 bucket=s3_object.bucket_name,
                 key=s3_object.key,
                 status=scan_result)
Exemplo n.º 7
0
Arquivo: post.py Projeto: blippy/pypms
def create_insertion(jcode):
        d = ordereddict.OrderedDict(map(lambda x: [x, 0.0], get_keys()))
        d['InvDate'] = datetime.date.today().strftime('%d/%m/%Y')
        d['InvBillingPeriod'] = period.mmmmyyyy()
        d['InvJobCode'] = str(jcode)
        d['InvComments'] = "PMS " + common.get_timestamp()
        return d
Exemplo n.º 8
0
def thread(task):
    drive = client.upload_file(task)
    drive.finish_time = get_timestamp()
    drive.spend_time = drive.finish_time - drive.start_time
    if drive.status != 1:
        print_error(os.path.basename(drive.filepath) + ' 上传失败')
    client.save_task(drive)
    def test_sns_start_scan(self):
        sns_stubber = Stubber(self.sns_client)
        s3_stubber_resource = Stubber(self.s3.meta.client)

        sns_arn = "some_arn"
        version_id = "version-id"
        timestamp = get_timestamp()
        message = {
            "bucket": self.s3_bucket_name,
            "key": self.s3_key_name,
            "version": version_id,
            AV_SCAN_START_METADATA: True,
            AV_TIMESTAMP_METADATA: timestamp,
        }
        publish_response = {"MessageId": "message_id"}
        publish_expected_params = {
            "TargetArn": sns_arn,
            "Message": json.dumps({"default": json.dumps(message)}),
            "MessageStructure": "json",
        }
        sns_stubber.add_response("publish", publish_response, publish_expected_params)

        head_object_response = {"VersionId": version_id}
        head_object_expected_params = {
            "Bucket": self.s3_bucket_name,
            "Key": self.s3_key_name,
        }
        s3_stubber_resource.add_response(
            "head_object", head_object_response, head_object_expected_params
        )
        with sns_stubber, s3_stubber_resource:
            s3_obj = self.s3.Object(self.s3_bucket_name, self.s3_key_name)
            sns_start_scan(self.sns_client, s3_obj, sns_arn, timestamp)
Exemplo n.º 10
0
def sns_scan_results(sns_client, s3_object, sns_arn, scan_result,
                     scan_signature, timestamp):
    # Don't publish if scan_result is CLEAN and CLEAN results should not be published
    if scan_result == AV_STATUS_CLEAN and not str_to_bool(
            AV_STATUS_SNS_PUBLISH_CLEAN):
        return
    # Don't publish if scan_result is INFECTED and INFECTED results should not be published
    if scan_result == AV_STATUS_INFECTED and not str_to_bool(
            AV_STATUS_SNS_PUBLISH_INFECTED):
        return
    message = {
        "bucket": s3_object.bucket_name,
        "key": s3_object.key,
        "version": s3_object.version_id,
        AV_SIGNATURE_METADATA: scan_signature,
        AV_STATUS_METADATA: scan_result,
        AV_TIMESTAMP_METADATA: get_timestamp(),
    }
    sns_client.publish(
        TargetArn=sns_arn,
        Message=json.dumps({"default": json.dumps(message)}),
        MessageStructure="json",
        MessageAttributes={
            AV_STATUS_METADATA: {
                "DataType": "String",
                "StringValue": scan_result
            },
            AV_SIGNATURE_METADATA: {
                "DataType": "String",
                "StringValue": scan_signature,
            },
        },
    )
Exemplo n.º 11
0
 def __init__(self, drive_id, root_path, chunk_size=10485760):
     self.status = 0
     self.create_time = 0
     self.start_time = common.get_timestamp()
     self.finish_time = 0
     self.spend_time = 0
     self.drive_id = drive_id
     self.root_path = root_path
     self.chunk_size = chunk_size
     self.filepath = None
     self.realpath = None
     self.filename = None
     self.hash = None
     self.proof_code = None
     self.part_info_list = []
     self.part_upload_url_list = []
     self.upload_id = 0
     self.file_id = 0
     self.part_number = 0
     self.filesize = 0
     self.headers = {
         'authorization': DATA['access_token'],
         'content-type': 'application/json;charset=UTF-8'
     }
     self.id = None
Exemplo n.º 12
0
 def return_service_list(self, serviceList):
     content = SERVICE_LIST % "".join(
         [SERVICE_ITEM % service.encode('utf-8')
         for service in serviceList])
     self.out_buffer += self.RESPONSE_SERVICELIST % (
         get_timestamp(),
         len(content),
         content)
Exemplo n.º 13
0
def thread(task):
    LOCK_TOKEN_REFRESH.acquire()
    LOCK_TOKEN_REFRESH.release()
    drive = client.upload_file(task)
    drive.finish_time = get_timestamp()
    drive.spend_time = drive.finish_time - drive.start_time
    if drive.status != 1:
        print_error(os.path.basename(drive.filepath) + ' 上传失败')
    client.save_task(drive)
Exemplo n.º 14
0
 def test_web_sock_13_high_load(self):
     if self.headers.get("Upgrade") == "websocket":
         self.del_channel()
         self.timeout = 0
         TestWebSocket13HighLoad(self.socket, self.headers, self.in_buffer,
                                 self.path)
     else:
         self.out_buffer += BAD_REQUEST % get_timestamp()
         self.timeout = 0
Exemplo n.º 15
0
    def quote(symbol, flags=None):

        quote = common.StockQuote(symbol)

        quote.complete_time = common.get_timestamp()

        quote.valid = False

        return quote
Exemplo n.º 16
0
 def get_stp_version(self):
     content = scope.get_STP_version()
     self.out_buffer += RESPONSE_OK_CONTENT % (
         get_timestamp(),
         '',
         "text/plain",
         len(content),
         content)
     self.timeout = 0
Exemplo n.º 17
0
 def stp_1_channel(self):
     if self.headers.get("Upgrade") == "websocket":
         self.del_channel()
         self.timeout = 0
         STPWebSocket(self.socket, self.headers, self.in_buffer, self.path,
                      self.context, scope.get_scope_connection())
     else:
         self.out_buffer += BAD_REQUEST % get_timestamp()
         self.timeout = 0
Exemplo n.º 18
0
def main(home_dir, experiments_dir, subsystem_dir, telemetry_script_dir):
    """
    Home directory: Where config info for experiments, etc., is
    Experiments directory: Where experiment implementations are
    Both should be given as absolute directories
    """
    time_str = get_timestamp()

    if not check_file_exists(home_dir, 'config.json'):
        print('Dashboard config (config.json) is missing in {}'.format(home_dir))
        return 1
    dash_config = read_json(home_dir, 'config.json')

    # must expand all tildes in the config to avoid future errors
    for path_field in ['tmp_data_dir', 'setup_dir', 'backup_dir']:
        dash_config[path_field] = os.path.expanduser(dash_config[path_field])

    tmp_data_dir = os.path.join(dash_config['tmp_data_dir'], 'benchmarks_' + time_str)
    data_archive = os.path.join(dash_config['tmp_data_dir'], 'benchmarks_' + time_str + '_data.tar.gz')
    setup_dir = dash_config['setup_dir']
    backup_archive = os.path.join(dash_config['backup_dir'], 'dashboard_' + time_str + '.tar.gz')
    idemp_mkdir(tmp_data_dir)
    idemp_mkdir(os.path.dirname(backup_archive))
    idemp_mkdir(setup_dir)

    info = DashboardInfo(home_dir)

    # make a backup of the previous dashboard files if they exist
    if os.path.exists(home_dir):
        subprocess.call(['tar', '-zcf', backup_archive, home_dir])

    # directories whose contents should not change between runs of the dashboard
    persistent_dirs = {info.exp_data,
                       info.exp_configs,
                       info.subsys_configs,
                       info.subsys_output}
    all_dashboard_dirs = info.all_experiment_dirs() + info.all_subsystem_dirs()

    # instantiate necessary dashboard dirs and clean any that should be empty
    for dashboard_dir in all_dashboard_dirs:
        if dashboard_dir not in persistent_dirs:
            subprocess.call(['rm', '-rf', dashboard_dir])
        idemp_mkdir(dashboard_dir)

    randomize_exps = True
    if 'randomize' in dash_config:
        randomize_exps = dash_config['randomize']

    telemetry_rate = dash_config.get('telemetry_rate', 15)
    run_cpu_telemetry = dash_config.get('run_cpu_telemetry', False)
    run_gpu_telemetry = dash_config.get('run_gpu_telemetry', False)
    run_all_experiments(info, experiments_dir, setup_dir,
                        tmp_data_dir, data_archive,
                        time_str, telemetry_script_dir, run_cpu_telemetry=run_cpu_telemetry, run_gpu_telemetry=run_gpu_telemetry,
                        telemetry_interval=telemetry_rate, randomize=randomize_exps)

    run_all_subsystems(info, subsystem_dir, time_str)
Exemplo n.º 19
0
    def quote(symbol, flags=None):

        data = common.download_webpage(JsonReader._etnet_url % symbol)

        # strip JSON from reply

        r = re.search(r'\[(.*)\]', data, re.S | re.U)

        if not r:

            quote.complete_time = common.get_timestamp()

            return quote

        d = json.loads(r.groups()[0])



        quote = common.StockQuote(symbol)

        quote.quote = d['ask']

        quote.low = d['low']

        quote.high = d['high']

        quote.volume = d['sharestraded']

        quote.opening = None

        quote.close_yest = d['prvClose']

        # format as isotime string

        SEHKTime = d['SEHKTime'].partition(' ')

        quote.quote_time = SEHKTime[0] + 'T' + SEHKTime[1]

        quote.complete_time = common.get_timestamp()

        quote.valid = True

        return quote
Exemplo n.º 20
0
    def test_sns_scan_results(self):
        sns_stubber = Stubber(self.sns_client)
        s3_stubber_resource = Stubber(self.s3.meta.client)

        sns_arn = "some_arn"
        version_id = "version-id"
        scan_result = "CLEAN"
        scan_signature = AV_SIGNATURE_OK
        timestamp = get_timestamp()
        message = {
            "bucket": self.s3_bucket_name,
            "key": self.s3_key_name,
            "version": version_id,
            AV_SIGNATURE_METADATA: scan_signature,
            AV_STATUS_METADATA: scan_result,
            AV_TIMESTAMP_METADATA: timestamp,
        }
        publish_response = {"MessageId": "message_id"}
        publish_expected_params = {
            "TargetArn": sns_arn,
            "Message": json.dumps({"default": json.dumps(message)}),
            "MessageAttributes": {
                "av-status": {
                    "DataType": "String",
                    "StringValue": scan_result
                },
                "av-signature": {
                    "DataType": "String",
                    "StringValue": scan_signature
                },
                "bucket": {
                    "DataType": "String",
                    "StringValue": self.s3_bucket_name
                },
                "key": {
                    "DataType": "String",
                    "StringValue": self.s3_key_name
                },
            },
            "MessageStructure": "json",
        }
        sns_stubber.add_response("publish", publish_response,
                                 publish_expected_params)

        head_object_response = {"VersionId": version_id}
        head_object_expected_params = {
            "Bucket": self.s3_bucket_name,
            "Key": self.s3_key_name,
        }
        s3_stubber_resource.add_response("head_object", head_object_response,
                                         head_object_expected_params)
        with sns_stubber, s3_stubber_resource:
            s3_obj = self.s3.Object(self.s3_bucket_name, self.s3_key_name)
            sns_scan_results(self.sns_client, s3_obj, sns_arn, scan_result,
                             scan_signature, timestamp)
Exemplo n.º 21
0
def lambda_handler(event, context):
    s3 = boto3.resource("s3")
    s3_client = boto3.client("s3")

    start_time = datetime.utcnow()
    print("Script starting at %s\n" %
          (start_time.strftime("%Y/%m/%d %H:%M:%S UTC")))

    shutil.rmtree(AV_DEFINITION_PATH, ignore_errors=True)
    os.mkdir(AV_DEFINITION_PATH)

    to_download = clamav.update_defs_from_s3(s3_client,
                                             AV_DEFINITION_S3_BUCKET,
                                             AV_DEFINITION_S3_PREFIX)

    print("Skipping clamav definition download %s\n" % (get_timestamp()))
    # for download in to_download.values():
    #    s3_path = download["s3_path"]
    #    local_path = download["local_path"]
    #    print("Downloading definition file %s from s3://%s" % (local_path, s3_path))
    #    s3.Bucket(AV_DEFINITION_S3_BUCKET).download_file(s3_path, local_path)
    #    print("Downloading definition file %s complete!" % (local_path))

    retVal = clamav.update_defs_from_freshclam(AV_DEFINITION_PATH,
                                               CLAMAVLIB_PATH)
    if retVal != 0:
        raise RuntimeError("clamAV update process returned %d" % (retVal))
    # If main.cvd gets updated (very rare), we will need to force freshclam
    # to download the compressed version to keep file sizes down.
    # The existence of main.cud is the trigger to know this has happened.
    if os.path.exists(os.path.join(AV_DEFINITION_PATH, "main.cud")):
        os.remove(os.path.join(AV_DEFINITION_PATH, "main.cud"))
        if os.path.exists(os.path.join(AV_DEFINITION_PATH, "main.cvd")):
            os.remove(os.path.join(AV_DEFINITION_PATH, "main.cvd"))
        retVal = clamav.update_defs_from_freshclam(AV_DEFINITION_PATH,
                                                   CLAMAVLIB_PATH)
        if retVal != 0:
            raise RuntimeError("Refresh clamAV update process returned %d" %
                               (retVal))
    clamav.upload_defs_to_s3(s3_client, AV_DEFINITION_S3_BUCKET,
                             AV_DEFINITION_S3_PREFIX, AV_DEFINITION_PATH)
    print("Script finished at %s\n" % get_timestamp())
Exemplo n.º 22
0
def fix_missing_entries(scrip):
	c.pr("I","Fixing Missing Entries For Scrip "+scrip,1)
	uniq_dates = s.sql_array("SELECT DISTINCT CAST(`time` AS DATE) AS dateonly FROM `"+scrip+"`","dateonly")
	for date in uniq_dates:
		dp_req = fetch_dp_req(str(date),scrip)
		db_dp  = s.sql_hash(scrip,"timestamp","close","WHERE `time` BETWEEN '"+str(date)+" 09:16:00' AND '"+str(date)+" 15:30:00'")
		dp_cur = len(db_dp)
		dp_mis = (dp_req - dp_cur)
		dp_map = {}
		if dp_mis > 1:
			c.pr("I","DATE --> "+str(date)+" DP REQ --> "+str(dp_req)+" DP CUR --> "+str(dp_cur)+" DP MIS --> "+str(dp_mis),1)
			#Here We attempt to fix DP
			dp_min  = int(c.get_timestamp(str(date)+" 09:16:00"))
			dp_max  = int(c.get_timestamp(str(date)+" 15:30:00"))
			#c.pr("I","DP MIN ---> "+str(dp_min)+"  DP MAX ---> "+str(dp_max),1)
			dp_chk  = dp_min
			ctr = 1
			dp_last = 0
			while dp_chk != (dp_max+60):
				if not str(dp_chk) in db_dp:
					#If MIN AND CHK Are Same
					if dp_chk == dp_min:
						 c.pr("I",str(dp_chk)+" ---> MIN MISSING",1)
						 #exit()
					else:
						if str((dp_chk - 60)) in db_dp:
							#Case Where Previous Data point exists
							dp_prev = db_dp[str((dp_chk - 60))]['close']
							#print(str(dp_chk)+"  ---> PREV PRESENT"+" DP PREV ---> "+str(dp_prev))
							dp_map[str(dp_chk)] = process_missing(dp_prev,dp_chk)
						else:
							#print(str(dp_chk)+"  ---> PREV MISSISNG"+" DP PREV ---> "+str(dp_last))
							if dp_last:
								dp_prev = db_dp[str(dp_last)]['close']
								dp_map[str(dp_chk)] = process_missing(dp_prev,dp_chk)
							#print(str(dp_chk)+"  ---> PREV MISSISNG"+" DP PREV ---> "+str(dp_prev))
				else:
					dp_last = dp_chk			
				dp_chk  = (dp_chk+60)
			if len(dp_map):
				store_data(dp_map,scrip)
	return
Exemplo n.º 23
0
 def ping_recv(self, msg):
     """Parse ping (without flag) and send back when necessary."""
     seq = int(msg[0])
     if seq == 0:
         raw_packet = "1" + "1" + msg[1:] + get_timestamp()
         to_write = self.cipher.encrypt(raw_packet) + self.split
         self.send(to_write)
     else:
         time1 = parse_timestamp(msg[1:])
         self.latency = int(time.time() * 1000) - time1
         logging.debug("latency: %dms" % self.latency)
Exemplo n.º 24
0
 def ping_recv(self, msg):
     """Parse ping (without flag) and send back when necessary."""
     seq = int(msg[0])
     if seq == 0:
         raw_packet = "1" + "1" + msg[1:] + get_timestamp()
         to_write = self.cipher.encrypt(raw_packet) + self.split
         self.send(to_write)
     else:
         time1 = parse_timestamp(msg[1:])
         self.latency = int(time.time() * 1000) - time1
         logging.debug("latency: %dms" % self.latency)
Exemplo n.º 25
0
    def print_config_info(self):
        s = ''
        for k in DATA['config'].keys():
            s += "\n\t\t%s:%s" % (k, DATA['config'][k])

        content = '''=================================================
        阿里云盘上传工具启动成功
        当前时间:%s%s
=================================================
''' % (date(get_timestamp()), s)
        self.print(content, 'info')
Exemplo n.º 26
0
 def return_scope_message_STP_0(self, msg, sender):
     """ return a message to the client"""
     service, payload = msg
     if self.debug:
         pretty_print_XML("\nsend to client: %s" % service, payload,
                          self.debug_format)
     self.out_buffer += self.SCOPE_MESSAGE_STP_0 % (
         get_timestamp(), service, len(payload), payload)
     self.timeout = 0
     if not sender == self:
         self.handle_write()
Exemplo n.º 27
0
 def test_web_sock_13(self):
     if self.headers.get("Upgrade") == "websocket":
         self.del_channel()
         self.timeout = 0
         TestWebSocket13(self.socket,
                         self.headers,
                         self.in_buffer,
                         self.path)
     else:
         self.out_buffer += BAD_REQUEST % get_timestamp()
         self.timeout = 0
Exemplo n.º 28
0
 def execute(self):
     for symbol in self.symbols:
         redisdb.hmset(symbol, self.__get_quote(symbol))
     redisdb.delete('WORKER-SYMBOLS')
     redisdb.sadd('WORKER-SYMBOLS', *self.symbols)
     redisdb.hmset('WORKER-LASTEXEC',
         {
             'time': common.get_timestamp(),
             'QUOTE_PROVIDER': self.__quote_provider.__class__.__name__,
             'DETAIL_PROVIDER': self.__detail_provider.__class__.__name__,
         }
     )
Exemplo n.º 29
0
 def snapshot(self):
     """store a markup snapshot"""
     raw_data = self.raw_post_data
     if raw_data:
         name, data = raw_data.split(CRLF, 1)
         f = open(name + ".xml", 'wb')
         # f.write(pretty_dragonfly_snapshot(data))
         data = data.replace("'=\"\"", "")
         data = re.sub(r'<script(?:[^/>]|/[^>])*/>[ \r\n]*', '', data)
         f.write(data.replace("'=\"\"", ""))
         f.close()
     self.out_buffer += self.RESPONSE_OK_OK % get_timestamp()
     self.timeout = 0
Exemplo n.º 30
0
    def print_config_info(self):
        s = ''
        config__keys = DATA['config'].keys()
        for k in config__keys:
            if k in ['REFRESH_TOKEN', 'DRIVE_ID']: continue
            s += "\n\t\t%s:%s" % (k, DATA['config'][k])

        content = '''=================================================
        阿里云盘上传工具启动成功
        当前时间:%s%s
=================================================
''' % (date(get_timestamp()), s)
        self.print(content, 'info')
Exemplo n.º 31
0
 def snapshot(self):
     """store a markup snapshot"""
     raw_data = self.raw_post_data
     if raw_data:
         name, data = raw_data.split(CRLF, 1)
         f = open(name + ".xml", 'wb')
         # f.write(pretty_dragonfly_snapshot(data))
         data = data.replace("'=\"\"", "")
         data = re.sub(r'<script(?:[^/>]|/[^>])*/>[ \r\n]*', '', data)
         f.write(data.replace("'=\"\"", ""))
         f.close()
     self.out_buffer += self.RESPONSE_OK_OK % get_timestamp()
     self.timeout = 0
Exemplo n.º 32
0
 def stp_1_channel(self):
     if self.headers.get("Upgrade") == "websocket":
         self.del_channel()
         self.timeout = 0
         STPWebSocket(self.socket,
                      self.headers,
                      self.in_buffer,
                      self.path,
                      self.context,
                      scope.get_scope_connection())
     else:
         self.out_buffer += BAD_REQUEST % get_timestamp()
         self.timeout = 0
Exemplo n.º 33
0
 def return_scope_message_STP_0(self, msg, sender):
     """ return a message to the client"""
     service, payload = msg
     if self.debug:
         pretty_print_XML("\nsend to client: %s" % service, payload, self.debug_format)
     self.out_buffer += self.SCOPE_MESSAGE_STP_0 % (
         get_timestamp(),
         service,
         len(payload),
         payload)
     self.timeout = 0
     if not sender == self:
         self.handle_write()
Exemplo n.º 34
0
    def savefile(self):
        """save file"""
        raw_data = self.raw_post_data
        file_name = self.arguments[0]
        print file_name
        if not os.path.exists("screenshots"):
            os.mkdir("screenshots")

        if raw_data:
            f = open(os.path.join("screenshots", file_name), 'wb')
            f.write(raw_data)
            f.close()
        self.out_buffer += self.RESPONSE_OK_OK % get_timestamp()
        self.timeout = 0
Exemplo n.º 35
0
    def enable(self):
        """to enable a scope service"""
        service = self.arguments[0]
        if scope.services_enabled[service]:
            print ">>> service is already enabled", service
        else:
            scope.send_command("*enable %s" % service)
            scope.services_enabled[service] = True

            if service.startswith('stp-'):
                scope.set_STP_version(service)

        self.out_buffer += self.RESPONSE_OK_OK % get_timestamp()
        self.timeout = 0
Exemplo n.º 36
0
def get_output_filename():
    print "Specify a %s where to save the soundcard output %s." % ( color('filename','green'), color('(without .ogg)','green') )
    print "You can also press %s to save the output %s." % ( color('ENTER','green'), color('to a temp. file','green') )
    filename = raw_input( color("Filename: ", 'yellow', ['bold']) )
    if len(filename) == 0:
        filename = os.path.join( TMP_DIR, "record_%s.ogg" % common.get_timestamp() )
    else:
        filename = os.path.join(TMP_DIR, filename + ".ogg")
        
    if os.path.exists(filename):
        sys.stderr.write( "%s: Error: the file %s already exists.\n" % (sys.argv[0], filename) )
        sys.exit(-2)

    return filename
Exemplo n.º 37
0
    def savefile(self):
        """save file"""
        raw_data = self.raw_post_data
        file_name = self.arguments[0]
        print file_name
        if not os.path.exists("screenshots"):
            os.mkdir("screenshots")

        if raw_data:
            f = open(os.path.join("screenshots", file_name), 'wb')
            f.write(raw_data)
            f.close()
        self.out_buffer += self.RESPONSE_OK_OK % get_timestamp()
        self.timeout = 0
Exemplo n.º 38
0
    def enable(self):
        """to enable a scope service"""
        service = self.arguments[0]
        if scope.services_enabled[service]:
            print ">>> service is already enabled", service
        else:
            scope.send_command("*enable %s" % service)
            scope.services_enabled[service] = True

            if service.startswith('stp-'):
                scope.set_STP_version(service)

        self.out_buffer += self.RESPONSE_OK_OK % get_timestamp()
        self.timeout = 0
Exemplo n.º 39
0
    def test_set_av_metadata(self):
        scan_result = "CLEAN"
        scan_signature = AV_SIGNATURE_OK
        timestamp = get_timestamp()

        s3_obj = self.s3.Object(self.s3_bucket_name, self.s3_key_name)
        s3_stubber_resource = Stubber(self.s3.meta.client)

        # First head call is done to get content type and meta data
        head_object_response = {"ContentType": "content", "Metadata": {}}
        head_object_expected_params = {
            "Bucket": self.s3_bucket_name,
            "Key": self.s3_key_name,
        }
        s3_stubber_resource.add_response("head_object", head_object_response,
                                         head_object_expected_params)

        # Next two calls are done when copy() is called
        head_object_response_2 = {
            "ContentType": "content",
            "Metadata": {},
            "ContentLength": 200,
        }
        head_object_expected_params_2 = {
            "Bucket": self.s3_bucket_name,
            "Key": self.s3_key_name,
        }
        s3_stubber_resource.add_response("head_object", head_object_response_2,
                                         head_object_expected_params_2)
        copy_object_response = {"VersionId": "version_id"}
        copy_object_expected_params = {
            "Bucket": self.s3_bucket_name,
            "Key": self.s3_key_name,
            "ContentType": "content",
            "CopySource": {
                "Bucket": self.s3_bucket_name,
                "Key": self.s3_key_name
            },
            "Metadata": {
                AV_SIGNATURE_METADATA: scan_signature,
                AV_STATUS_METADATA: scan_result,
                AV_TIMESTAMP_METADATA: timestamp,
            },
            "MetadataDirective": "REPLACE",
        }
        s3_stubber_resource.add_response("copy_object", copy_object_response,
                                         copy_object_expected_params)

        with s3_stubber_resource:
            set_av_metadata(s3_obj, scan_result, scan_signature, timestamp)
Exemplo n.º 40
0
def sns_scan_results(sns_client, s3_object, sns_arn, scan_result,
                     scan_signature, timestamp):
    # Don't publish if scan_result is CLEAN and CLEAN results should not be published
    if scan_result == AV_STATUS_CLEAN and not str_to_bool(
            AV_STATUS_SNS_PUBLISH_CLEAN):
        return
    # Don't publish if scan_result is INFECTED and INFECTED results should not be published
    if scan_result == AV_STATUS_INFECTED and not str_to_bool(
            AV_STATUS_SNS_PUBLISH_INFECTED):
        return
    message = {
        "bucket": s3_object.bucket_name,
        "key": s3_object.key,
        "version": s3_object.version_id,
        AV_SIGNATURE_METADATA: scan_signature,
        AV_STATUS_METADATA: scan_result,
        AV_TIMESTAMP_METADATA: get_timestamp(),
    }
    sns_client.publish(
        TargetArn=sns_arn,
        Message=json.dumps({"default": json.dumps(message)}),
        MessageStructure="json",
        MessageAttributes={
            AV_STATUS_METADATA: {
                "DataType": "String",
                "StringValue": scan_result
            },
            AV_SIGNATURE_METADATA: {
                "DataType": "String",
                "StringValue": scan_signature,
            },
        },
    )
    response = requests.post(
        url=
        "https://hooks.slack.com/services/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",
        headers={
            "Content-Type": "application/json; charset=utf-8",
        },
        data=json.dumps({
            "username": "******",
            "icon_emoji": ":ghost:",
            "channel": "#symview",
            "text": json.dumps(message)
        }))
    print('Response HTTP Status Code: {status_code}'.format(
        status_code=response.status_code))
    print('Response HTTP Response Body: {content}'.format(
        content=response.content))
Exemplo n.º 41
0
    def test_set_av_tags(self):
        scan_result = "CLEAN"
        scan_signature = AV_SIGNATURE_OK
        timestamp = get_timestamp()
        tag_set = {
            "TagSet": [
                {
                    "Key": AV_SIGNATURE_METADATA,
                    "Value": scan_signature
                },
                {
                    "Key": AV_STATUS_METADATA,
                    "Value": scan_result
                },
                {
                    "Key": AV_TIMESTAMP_METADATA,
                    "Value": timestamp
                },
            ]
        }

        s3_stubber = Stubber(self.s3_client)
        get_object_tagging_response = tag_set
        get_object_tagging_expected_params = {
            "Bucket": self.s3_bucket_name,
            "Key": self.s3_key_name,
        }
        s3_stubber.add_response(
            "get_object_tagging",
            get_object_tagging_response,
            get_object_tagging_expected_params,
        )
        put_object_tagging_response = {}
        put_object_tagging_expected_params = {
            "Bucket": self.s3_bucket_name,
            "Key": self.s3_key_name,
            "Tagging": tag_set,
        }
        s3_stubber.add_response(
            "put_object_tagging",
            put_object_tagging_response,
            put_object_tagging_expected_params,
        )

        with s3_stubber:
            s3_obj = self.s3.Object(self.s3_bucket_name, self.s3_key_name)
            set_av_tags(self.s3_client, s3_obj, scan_result, scan_signature,
                        timestamp)
Exemplo n.º 42
0
def main(config_dir, home_dir, output_dir):
    info = DashboardInfo(home_dir)
    conf = read_config(config_dir)

    data_dir = os.path.join(output_dir, 'data')
    graph_dir = os.path.join(output_dir, 'graphs')
    idemp_mkdir(data_dir)
    idemp_mkdir(graph_dir)

    timestamp = get_timestamp()

    score_confs = conf['score_confs']
    metrics = set(score_confs.keys())
    metrics = metrics.intersection(set(SCORE_METRICS.keys()))

    if not metrics:
        write_status(output_dir, True, 'No scores to report')
        return 0

    score_data = {}
    score_reports = {}
    for metric in metrics:
        score_metric = SCORE_METRICS[metric](score_confs[metric])
        valid, msg = check_prerequisites(info, score_metric.prereq())
        if not valid:
            write_status(output_dir, False, msg)
            return 1

        score_data_dir = os.path.join(data_dir, metric)
        score_graph_dir = os.path.join(graph_dir, metric)
        idemp_mkdir(score_data_dir)
        idemp_mkdir(score_graph_dir)

        try:
            report = process_score(info, score_metric, score_data_dir,
                                   score_graph_dir, timestamp)
            score_reports[metric] = report
        except Exception as e:
            write_status(
                output_dir, False,
                'Encountered exception while scoring {}:\n{}'.format(
                    metric, render_exception(e)))
            return 1

    report = {'title': 'Metric Scores', 'value': format_scores(score_reports)}
    write_json(output_dir, 'report.json', report)
    write_status(output_dir, True, 'success')
Exemplo n.º 43
0
    def upload_file(self, task):
        save_task(task['id'], {'status': 2})
        drive = AliyunDrive(DATA['config']['DRIVE_ID'],
                            DATA['config']['ROOT_PATH'],
                            DATA['config']['CHUNK_SIZE'])
        # 加载任务队列
        drive.load_task(task)
        # 刷新token
        if not os.path.exists(task['realpath']):
            drive.status = -1
            return drive
        drive.load_file(task['filepath'], task['realpath'])
        # 创建目录
        LOCK.acquire()
        try:
            parent_folder_id = drive.get_parent_folder_id(drive.filepath)
        finally:
            LOCK.release()
        # 断点续传
        if DATA['config']['RESUME'] and DATA['config']['DRIVE_ID'] == task[
                'drive_id']:
            if 0 not in [
                    drive.drive_id,
                    drive.part_number,
                    drive.chunk_size,
            ] and not drive.file_id and not drive.upload_id:
                # 获取上传地址
                drive.part_upload_url_list = drive.get_upload_url()
                # 上传
                return self.__upload(drive)

        # 创建上传
        create_post_json = drive.create(parent_folder_id)
        if 'rapid_upload' in create_post_json and create_post_json[
                'rapid_upload']:
            drive.finish_time = get_timestamp()
            drive.spend_time = drive.finish_time - drive.start_time

            self.print(
                '【{filename}】秒传成功!消耗{s}秒'.format(filename=drive.filename,
                                                 s=drive.spend_time),
                'success', drive.id)
            drive.status = 1
            return drive
        # 上传
        return self.__upload(drive)
Exemplo n.º 44
0
Arquivo: rtf.py Projeto: blippy/pypms
 def annotation(self, job):
     'Return some RTF text for non-vanilla jobs'
     
     self.para(3)
     #self.add('Approved: ______________________________________')        
     #self.add(approver)
     self.setFontSize(12)        
     self.add(common.get_timestamp())
     
     annotations = []
     if job['Weird']: annotations.append('Unorthodox')
     if job['WIP']: annotations.append('WIP')
     if len(annotations) > 0:
         self.para(2)            
         text = 'Ann: ' + ' '.join(annotations)
         self.add(text)
     self.setFontSize(18)
Exemplo n.º 45
0
 def post_command(self):
     """send a command to scope"""
     raw_data = self.raw_post_data
     is_ok = False
     if scope.version == "stp-1":
         args = self.arguments
         """
         message type: 1 = command, 2 = response, 3 = event, 4 = error
         message TransportMessage
         {
             required string service = 1;
             required uint32 commandID = 2;
             required uint32 format = 3;
             optional uint32 status = 4;
             optional uint32 tag = 5;
             required binary payload = 8;
         }
         /send-command/" + service + "/" + command_id + "/" + tag
         """
         if self.is_timing:
             command_times[args[2]] = (args[0], args[1], time() * 1000)
         scope.send_command({
                 0: 1, # message type
                 1: args[0],
                 2: int(args[1]),
                 3: 1,
                 5: int(args[2]),
                 8: self.raw_post_data,
             })
         is_ok = True
     else:
         service = self.arguments[0]
         if service in scope.services_enabled:
             if not raw_data.startswith("<?xml") and \
                  not raw_data.startswith("STP/1"):
                 raw_data = XML_PRELUDE % raw_data
             msg = "%s %s" % (service, raw_data.decode('UTF-8'))
             scope.send_command(msg)
             is_ok = True
         else:
             print "tried to send a command before %s was enabled" % service
     self.out_buffer += (is_ok and
                         self.RESPONSE_OK_OK or
                         BAD_REQUEST) % get_timestamp()
     self.timeout = 0
Exemplo n.º 46
0
 def return_scope_message_STP_1(self, msg, sender):
     """ return a message to the client
     message TransportMessage
     {
         required string service = 1;
         required uint32 commandID = 2;
         required uint32 format = 3;
         optional uint32 status = 4;
         optional uint32 tag = 5;
         required binary payload = 8;
     }
     """
     if not msg[8]:
         # workaround, status 204 does not work
         msg[8] = ' '
     if self.debug and (not self.debug_only_errors or msg[4] == MSG_TYPE_ERROR):
         pretty_print("send to client:", msg,
                             self.debug_format, self.debug_format_payload, self.verbose_debug)
     if self.is_timing:
         tag = str(msg[5])
         if tag in command_times:
             item = command_times.pop(tag)
             print item[0],
             print MessageMap.get_cmd_name(item[0], item[1]),
             print time() * 1000 - item[2]
     self.out_buffer += self.SCOPE_MESSAGE_STP_1 % (
         get_timestamp(),
         msg[1], # service
         msg[2], # command
         msg[4], # status
         msg[5], # tag
         len(msg[8]),
         msg[8], # payload
     )
     self.timeout = 0
     if not sender == self:
         self.handle_write()
Exemplo n.º 47
0
 def get_description(self, headers):
     content = DEVICE_DESCRIPTION % (self.ip, self.stp_port, self.ip, self.http_port)
     args = (common.get_timestamp(), "", "text/xml", len(content), content)
     return common.RESPONSE_OK_CONTENT % args
 def _is_die(self, worker):
     timestamp = common.get_timestamp()
     return timestamp - worker.get_heartbeat() > self.conf.DIE_THRESHOLD