def swapoff(device): swapoff = "/sbin/swapoff '%s'" % device log.info1("Disable swap on '%s'.", device) if run_script(swapoff) != 0: log.error("swapoff failed.") return 1 return 0
def swapon(device): swapon = "/sbin/swapon '%s'" % device log.info1("Enable swap on '%s'.", device) if run_script(swapon) != 0: log.error("swapon failed.") return 1 return 0
def mapTo(self, mapto): if self.active: log.error("Unable to remap '%s', because it is already active.", self.name) return 0 self.device = "/dev/" % mapto return 1
def umount(what): stat = os.system("/bin/umount '%s' 2>/dev/null" % what) if stat != 0: log.error("Umount of '%s' failed.", what) return 1 return 0
def new_disklabel(self, label): if not label in Disk.diskType: log.error("Disk label '%s' is not supported.", label) return self.ped_disk = self.ped_device.disk_new_fresh( Disk.diskType[label]) self.reload()
def x_config(ks, buildroot, source): # default: VGA graphics card, Generic extended super VGA monitor card = "Unknown video card" driver = "vga" videoram = 0 monitor = "Unknown monitor" hsync = "31.5 - 37.9" vsync = "50 - 61" dpms = 0 resolution = "800x600" depth = 8 options = [ ] # keyboard (kbd_layout, kbd_model, kbd_variant, kbd_options) = \ keyboard.keyboard_models[ks["keyboard"]] _card = None _driver = None _options = [ ] if ks["xconfig"].has_key("card"): try: cards = hwdata.Cards(buildroot) except Exception, msg: log.warning("Loading of monitor databae failed.") flog.info1(msg, nofmt=1) else: dict = cards.get(ks["xconfig"]["card"]) if dict and dict.has_key("driver"): _card = ks["xconfig"]["card"] _driver = dict["driver"] if dict.has_key("options"): _options.extend(dict["options"]) else: log.error("Card not found in hardware database.")
def start(self): command = "%s vgchange -a y '%s'" % (LVM_VOLGROUP.prog, self.name) if run_script(command, self.chroot, log) != 0: log.error("Activation of volume group '%s' failed.", self.name) return 0 self.active = True return 1
def build_connection(self): try: self.mq_connection = pika.BlockingConnection( pika.URLParameters(MQ_CONN_URL)) self.channel = self.mq_connection.channel() # 方法一:直接从给定的queue中取消息 if self.queue: self.channel.queue_declare(queue=self.queue, durable=True) # 方法二:通过exchange建立临时队列,动态获取消息 else: assert self.exchange, 'queue or exchange are not defined!' self.channel.exchange_declare(exchange=self.exchange, exchange_type=self.exchange_type, durable=True) result = self.channel.queue_declare( exclusive=True) # 声明匿名队列,断开连接后删除 self.queue = result.method.queue self.channel.queue_bind(exchange=self.exchange, queue=self.queue) # 开始接收队列消息 self.channel.basic_consume( consumer_callback=self.call_back, queue=self.queue, exclusive=True # no_ack=True # 写的话,如果接收消息,机器宕机消息就丢了, # 一般不写。宕机则生产者检测到发给其他消费者 ) except Exception as e: log.error('Build connection failed: %s' % e)
def display(chroot=None): command = "%s pvdisplay --units b 2>/dev/null" % \ LVM_PHYSICAL_VOLUME.prog log.debug1(command) (status, rusage, msg) = runScript(script=command, chroot=chroot) if msg and msg != "": log.debug1(msg, nofmt=1) if status != 0: log.error("Failed to get general physical volume information.") return None dict = { } device = None for line in msg.split("\n"): line = line.strip() if len(line) < 1 or line[0] == '#': continue if line[:7] == "PV Name": device = line[7:].strip() dict[device] = { } d = dict[device] if not device: continue try: if line[:7] == "VG Name": d["vgname"] = line[7:].strip() elif line[:7] == "PV UUID": d["pvuuid"] = line[7:].strip() except: log.error("pvdisplay output malformed.") return None return dict
def upload_to_s3(self, cloud_path, file_path): try: if not self.get_s3_connection(): log.error("FAILURE UPLOAD: S3 connection is not builded.") return False if self.check_from_s3(cloud_path): log.info("S3 object already exists: %s" % cloud_path) return True md5id = self.GetFileMd5(file_path) bucket = self.s3_conn.get_bucket(self.bucket, validate=False) object_path = cloud_path.replace("\\", "/") if object_path[0] == '/': object_path = object_path else: object_path = '/' + object_path kobject = bucket.new_key(object_path) filesize = os.stat(file_path).st_size if filesize >= MULTI_UPLOAD_THRESHOLD_SIZE: upload_file_multipart(file_path, object_path, bucket, md5id) log.info("SUCCESS to S3: multipart way, uploading file path: %s " % file_path) else: kobject.set_contents_from_filename(file_path, headers={'CONTENT-MD5' : md5id}) log.info("SUCCESS to S3: singlefile way, uploading file path: %s" % file_path) except Exception, e: log.error("FAILURE to S3: error: %s, uploading file path: %s" % (e, file_path)) return False
def get_immutable_data( data_key ): """ Given the hash of the data, go through the list of immutable data handlers and look it up. Return the data on success """ global storage_handlers for handler in storage_handlers: if not hasattr( handler, "get_immutable_handler" ): continue data = None try: data = handler.get_immutable_handler( data_key ) except Exception, e: log.exception( e ) continue if data is None: continue # validate data_hash = get_data_hash( data ) if data_hash != data_key: # nope log.error("Invalid data hash") continue return data
def put(self, storagemodel: object) -> StorageQueueModel: """ insert queue message into storage """ modelname = storagemodel.__class__.__name__ if isinstance(storagemodel, StorageQueueModel): if (modelname in self._models): """ peek first message in queue """ try: message = self._service.put_message( storagemodel._queuename, storagemodel.getmessage()) storagemodel.mergemessage(message) except AzureException as e: log.error( 'can not save queue message: queue {} with message {} because {!s}' .format(storagemodel._queuename, storagemodel.content, e)) storagemodel = None else: log.info('please register model {} first'.format(modelname)) storagemodel = None else: log.info('model {} is not a Queue Model'.format(modelname)) storagemodel = None return storagemodel
def delete(self, storagemodel: object) -> bool: """ delete the message in queue """ modelname = storagemodel.__class__.__name__ deleted = False if isinstance(storagemodel, StorageQueueModel): if (modelname in self._models): """ check if message in queue """ if (storagemodel.id != '') and (storagemodel.pop_receipt != '') and (not storagemodel.id is None) and ( not storagemodel.pop_receipt is None): try: self._service.delete_message(storagemodel._queuename, storagemodel.id, storagemodel.pop_receipt) deleted = True except AzureException as e: log.error( 'can not delete queue message: queue {} with message.id {!s} because {!s}' .format(storagemodel._queuename, storagemodel.id, e)) else: log.info( 'cant update queuemessage {} due to missing id and pop_receipt' .format(modelname)) else: log.info('please register model {} first'.format(modelname)) else: log.info('model {} is not a Queue Model'.format(modelname)) return deleted
def load_user(record_hash): """ Load a user record from the storage implementation with the given hex string hash, The user record hash should have been loaded from the blockchain, and thereby be the authentic hash. Return the user record on success Return None on error """ user_json = storage.get_immutable_data(record_hash) if user_json is None: log.error("Failed to load user record '%s'" % record_hash) return None # verify integrity user_record_hash = storage.get_data_hash(user_json) if user_record_hash != record_hash: log.error( "Profile hash mismatch: expected '%s', got '%s'" % record_hash, user_record_hash) return None user = user_db.parse_user(user_json) return user
def display(chroot=None): command = "%s pvdisplay --units b 2>/dev/null" % \ LVM_PHYSICAL_VOLUME.prog log.debug1(command) (status, rusage, msg) = runScript(script=command, chroot=chroot) if msg and msg != "": log.debug1(msg, nofmt=1) if status != 0: log.error("Failed to get general physical volume information.") return None dict = {} device = None for line in msg.split("\n"): line = line.strip() if len(line) < 1 or line[0] == '#': continue if line[:7] == "PV Name": device = line[7:].strip() dict[device] = {} d = dict[device] if not device: continue try: if line[:7] == "VG Name": d["vgname"] = line[7:].strip() elif line[:7] == "PV UUID": d["pvuuid"] = line[7:].strip() except: log.error("pvdisplay output malformed.") return None return dict
def gen_shard(sess, input_base_dir, image_filenames, output_filename, image_texts): """Create a TFRecord file from a list of image filenames""" writer = tf.python_io.TFRecordWriter(output_filename) for item, filename in enumerate(image_filenames): path_filename = os.path.join(input_base_dir, filename) if os.stat(path_filename).st_size == 0: log.warning('Skipping empty files: %s' % (filename, )) continue try: image_data, height, width = get_image(sess, path_filename) text, labels = get_text_and_labels(image_texts[item]) if is_writable(width, text): #查看文本和标签 # print(text,labels) if len(labels) == 0: print(text, labels) else: example = make_example(filename, image_data, labels, text, height, width) writer.write(example.SerializeToString()) else: log.info('Skipping Image with too short width: %s' % (filename, )) except Exception as e: # Some files have bogus payloads, catch and note the error, moving on log.warning('Error occured during processing file %s' % (filename, )) log.error(e) writer.close()
def download_from_s3(self, object_key, download_dir): ''' :param key_path: s3里面对象的key :param download_dir: 下载到本地的目录 :return: ''' local_path = '' # 文件从s3上下载下来后,保存到的本地路径 try: if not self.get_s3_connection(): log.error("FAILURE UPLOAD: S3 connection is not builded.") return False if not os.path.isdir(download_dir): os.makedirs(download_dir) if len(object_key.split('/')) < 2: file_name = object_key else: file_name = object_key.split('/')[-1] #取出文件名 local_path = os.path.join(download_dir, file_name) log.info("File will be download: %s" % local_path) bucket = self.s3_conn.get_bucket(self.bucket, validate=False) kobject = bucket.get_key(object_key) kobject.get_contents_to_filename(local_path) log.info("Download file from s3_operator successfully" ) except Exception as error: log.error("Download file from s3_operator failed, error:%s" % error) return local_path
def update(self, storagemodel:object, hide = 0) -> StorageQueueModel: """ update the message in queue """ modelname = storagemodel.__class__.__name__ if isinstance(storagemodel, StorageQueueModel): if (modelname in self._models): """ check if message in queue """ if (storagemodel.id != '') and (storagemodel.pop_receipt != '') and (not storagemodel.id is None) and (not storagemodel.pop_receipt is None): try: content = storagemodel.getmessage() message = self._service.update_message(storagemodel._queuename, storagemodel.id, storagemodel.pop_receipt, visibility_timeout = hide, content=content) storagemodel.content = content storagemodel.pop_receipt = message.pop_receipt except AzureException as e: log.error('can not update queue message: queue {} with message.id {!s} because {!s}'.format(storagemodel._queuename, storagemodel.id, e)) storagemodel = None else: log.info('cant update queuemessage {} due to missing id and pop_receipt'.format(modelname)) storagemodel = None else: log.info('please register model {} first'.format(modelname)) storagemodel = None else: log.info('model {} is not a Queue Model'.format(modelname)) storagemodel = None return storagemodel
def get_system_disks(): disks = [ ] fd = None try: fd = open("/proc/partitions", "r") while 1: line = fd.readline() if not line: break line = line.strip() if len(line) < 1 or line[0] == '#': continue if line[:5] == "major": continue splits = line.split() # major, minor, blocks, name if len(splits) < 4: log.error("'/proc/partitions' malformed.") return if int(splits[1]) % 16 == 0: # minor%16=0 for harddisk devices hd = splits[3] if hd[0:4] == "loop": continue disks.append("/dev/"+hd) finally: if fd: fd.close() return disks
def put_immutable_data( data_text, txid ): """ Given a string of data (which can either be data or a route), store it into our immutable data stores. Do so in a best-effort manner--this method only fails if *all* storage providers fail. Return the hash of the data on success Return None on error """ global storage_handlers data_hash = get_data_hash( data_text ) successes = 0 for handler in storage_handlers: if not getattr(handler, "put_immutable_handler"): continue rc = False try: rc = handler.put_immutable_handler( data_hash, data_text, txid ) except Exception, e: log.exception(e) continue if not rc: log.error("Failed to replicate with '%s'" % handler.__name__) else: successes += 1
def load_from_db(directory, directory_checksum): """ Checks if the directory location information is already stored in the DB. Checksum is also computed and compares with the one stored in the DB, in case there are changes in the directory Args: directory: absolute path of the photo directory directory_checksum: computed checksum of the directory Returns: database content corresponding to 'locations' key of the directory None - if not present """ try: with MongoConnector() as mongo: directory_base = os.path.basename(directory) # Check if and entry with the directory name is present in DB db_dir_metadata = mongo.find_one({'directory': directory_base }) or None # Check if directory has an entry in the db and if so # if the checksum from db is the same as the computed one if db_dir_metadata and directory_checksum == db_dir_metadata[ 'directory_checksum']: log.info("Loading data from DB...") # log.debug (json.dumps(db_dir_metadata['locations'],indent=1)) return db_dir_metadata['locations'] except KeyError as e: log.warning("Check DB structure! Key {} is missing. \ Re-computing result!".format(e)) except Exception as e: log.error(e)
def cmdcb(s): tstamp = time.strftime("%H:%M:%S", time.localtime()) logwin.println("{} {}".format(tstamp, s)) if s == 'quit': sys.exit() elif s == 'bstart': benchmarks = "matrix wordpress blosc static sdag sdagp pgbench ffmpeg".split() for bname, vm in zip(benchmarks, VMS): cmd = bench_cmd[bname] p = vm.Popen(cmd, stdout=DEVNULL, stderr=DEVNULL) vm.bname = bname assert not hasattr(vm, 'pipe') vm.pipe = p elif s == 'bstop': for vm in VMS: vm.unfreeze() p = vm.pipe if p.returncode is not None: log.error("for VM %s: task is already dead, dude" % vm) p.killall() elif s == 'pstop': prof_ev.clear() elif s == 'pstart': prof_ev.set() elif s == 'redraw': root.canvas.clear() root.draw()
async def fetcher(database): """Fetch all the feeds""" # disable certificate validation to cope with self-signed certificates in some feed back-ends client = ClientSession(connector=TCPConnector(verify_ssl=False)) sem = Semaphore(MAX_CONCURRENT_REQUESTS) queue = await connect_redis() while True: log.info("Beginning run.") tasks = [] threshold = datetime.now() - timedelta(seconds=FETCH_INTERVAL) async with ClientSession() as session: while True: try: job = await dequeue(queue, 'fetcher') feed = await database.feeds.find_one({'_id': job['_id']}) last_fetched = feed.get('last_fetched', threshold) if last_fetched <= threshold: task = ensure_future( throttle(sem, session, feed, client, database, queue)) tasks.append(task) except Exception: log.error(format_exc()) break responses = gather(*tasks) await responses log.info("Run complete, sleeping %ds...", CHECK_INTERVAL) await sleep(CHECK_INTERVAL) queue.close() await queue.wait_closed()
def _get_index(self): web = Web(self.per_url) req = web.get() if req.status_code != 200: log.error("Can not retrieve periodic page {}".format(self.per_url)) return None return req.content
def login(self): """ Login to google servers Raises: UnknownLoginLocation: User is trying to in from an unauthorized unknown device BadCredentials: Username or password is incorrect """ log.info('Logging in to googlemail server as {}'.format( self.GMAIL_USERNAME)) try: self.server.login(self.GMAIL_USERNAME, self.GMAIL_PASSWORD) except SMTPAuthenticationError as ex: if ex.smtp_code == 534: raise UnknownLoginLocation if ex.smtp_code == 535: log.error("Username or password is incorrect") raise BadCredentials except SMTPSenderRefused as ex: log.error( "Google blocking login. Go to your gmail and allow access from this location" ) raise else: self.is_loggedin = True
def post(url,port,uri,headers_list,data,timeout=30): client = None try: headers = {} for type_name,type in headers_list: headers[type_name] = type if port == -1: client = httplib.HTTPConnection(url,timeout=timeout) else: client = httplib.HTTPConnection(url,port,timeout=timeout) client.request('POST',uri,data,headers) response = client.getresponse() if response.status == 200: try: data = response.read() return (response.status,data) except: return (response.status,None) else: return (response.status,None) except: log.error(traceback.print_exc()) return (500,None) finally: if client: client.close()
def put(self, serial,key): schema = PostsSchema(partial=True) post = Post.find_by_serial(serial) if not post: return {"message": "There is no post with this serial. Please recheck."},404 if post.status != 'encrypted': return {"message": "This post is not encrypted. Everyone can read it."},400 if not bcrypt.check_password_hash(post.encryptionKey, key): return {"message": "This is the wrong key. We can't decrypt the message, so you can't edit it."}, 401 data = schema.load(request.get_json()) #You can change the title,category, content and status. if data.title: post.title = data.title if data.category: post.category = data.category if data.content: post.content = dataEnc.encodeString(data.content, key) if data.status and data.status != 'encrypted': post.encryptionKey = None #Removing the encryption key. post.content = dataEnc.decodeString(post.content, key) post.status = data.status try: post.save_to_db() return {"message": "Post with serial `{}` has been updated in our database.".format(serial)},200 except Exception as e: log.error("There was an error when updated an encrypted post. Check message: {}".format(e)) return {"message":"Something went wrong. We can't upload this in our database."},500
def put_immutable_data(data_text, txid): """ Given a string of data (which can either be data or a route), store it into our immutable data stores. Do so in a best-effort manner--this method only fails if *all* storage providers fail. Return the hash of the data on success Return None on error """ global storage_handlers data_hash = get_data_hash(data_text) successes = 0 for handler in storage_handlers: if not getattr(handler, "put_immutable_handler"): continue rc = False try: rc = handler.put_immutable_handler(data_hash, data_text, txid) except Exception, e: log.exception(e) continue if not rc: log.error("Failed to replicate with '%s'" % handler.__name__) else: successes += 1
def get_immutable_data(data_key): """ Given the hash of the data, go through the list of immutable data handlers and look it up. Return the data on success """ global storage_handlers for handler in storage_handlers: if not hasattr(handler, "get_immutable_handler"): continue data = None try: data = handler.get_immutable_handler(data_key) except Exception, e: log.exception(e) continue if data is None: continue # validate data_hash = get_data_hash(data) if data_hash != data_key: # nope log.error("Invalid data hash") continue return data
def get(self, storagemodel:object, hide = 0) -> StorageQueueModel: """ lookup the next message in queue """ modelname = storagemodel.__class__.__name__ if isinstance(storagemodel, StorageQueueModel): if (modelname in self._models): """ get first message in queue """ try: if hide > 0: messages = self._service.get_messages(storagemodel._queuename, num_messages=1, visibility_timeout = hide) else: messages = self._service.get_messages(storagemodel._queuename, num_messages=1) """ parse retrieved message """ for message in messages: storagemodel.mergemessage(message) """ no message retrieved ?""" if storagemodel.id is None: storagemodel = None except AzureException as e: log.error('can not get queue message: queue {} with message {} because {!s}'.format(storagemodel._queuename, storagemodel.content, e)) storagemodel = None else: log.info('please register model {} first'.format(modelname)) storagemodel = None else: log.info('model {} is not a Queue Model'.format(modelname)) storagemodel = None return storagemodel
async def feed_fetcher(database): """Fetch all the feeds""" client = ClientSession(auth=BasicAuth(API_KEY)) sem = Semaphore(MAX_CONCURRENT_REQUESTS) queue = await connect_redis() while True: log.info("Beginning run.") tasks = [] async with client as session: while True: try: job = await dequeue(queue, 'fetcher') feed = await database.feeds.find_one( {'advert_id': job['_id']}) task = ensure_future( throttle(sem, session, feed, client, database, queue)) tasks.append(task) except Exception: log.error(format_exc()) break responses = gather(*tasks) await responses log.info("Run complete, sleeping %ds...", CHECK_INTERVAL) await sleep(1) queue.close() await queue.wait_closed()
def get_system_disks(): disks = [] fd = None try: fd = open("/proc/partitions", "r") while 1: line = fd.readline() if not line: break line = line.strip() if len(line) < 1 or line[0] == '#': continue if line[:5] == "major": continue splits = line.split() # major, minor, blocks, name if len(splits) < 4: log.error("'/proc/partitions' malformed.") return if int(splits[1]) % 16 == 0: # minor%16=0 for harddisk devices hd = splits[3] if hd[0:4] == "loop": continue disks.append("/dev/" + hd) finally: if fd: fd.close() return disks
def save(self, syncwithstorage=True): """ insert or merge self into storage """ if syncwithstorage: """ try to merge entry """ try: self._tableservice.insert_or_merge_entity( self._tablename, self.__image__(entity=True)) """ sync self """ self.load() except AzureMissingResourceHttpError as e: log.error( 'can not insert or merge table entity: Table {}, PartitionKey {}, RowKey {} because {!s}' .format(self._tablename, self._PartitionKey, self._RowKey, e)) else: """ try to replace entry """ try: self._tableservice.insert_or_replace_entity( self._tablename, self.__image__(entity=True)) self._existsinstorage = True except AzureMissingResourceHttpError as e: log.debug( 'can not insert or replace table entity: Table {}, PartitionKey {}, RowKey {} because {!s}' .format(self._tablename, self._PartitionKey, self._RowKey, e))
def _sync(self, tag, force): repo_name = self.config['repo_name'] sync_script = '{0}/{1}.sync'.format(self.config["sync_dir"], repo_name) if os.path.exists(sync_script): log.info('{0} :: Calling sync script at {1}'.format(__name__, sync_script)) proc = subprocess.Popen([sync_script, '--repo="{0}"'.format(repo_name), '--tag="{0}"'.format(tag), '--force="{0}"'.format(force)]) proc_out = proc.communicate()[0] log.info(proc_out) if proc.returncode != 0: exit_code = 40 log.error("{0} :: {1}".format(__name__, exit_codes[exit_code])) return exit_code else: # In absence of a sync script -- Tag the repo log.debug(__name__ + ' :: Calling default sync.') try: self._dulwich_tag(tag, self._make_author()) except Exception as e: log.error(str(e)) raise SartorisError(message=exit_codes[12], exit_code=12) self._default_sync() self._remove_lock() return 0
def _get_more_data(self, j): def delta(e, s): return (self._parse_ts(e) - self._parse_ts(s)).seconds / 60 start = end = None j.update({ 'status': 'FAILURE', 'fail': True, 'branch': '' }) console = self._get_console(j) if not console: log.error("Failed to get console for periodic {}".format(repr(j))) else: for line in fileinput.input(console, openhook=fileinput.hook_compressed): if "Finished: SUCCESS" in line: j['fail'] = False j['status'] = 'SUCCESS' elif "Finished: FAILURE" in line: j['fail'] = True j['status'] = 'FAILURE' elif "Finished: ABORTED" in line: j['fail'] = True j['status'] = 'ABORTED' if branch_re.search(line): j['branch'] = branch_re.search(line).group(1) if 'Started by user' in line: start = ts_re.search(line).group(1) if "Finished: " in line: end = ts_re.search(line).group(1) j['length'] = delta(end, start) if start and end else 0 return j
def send(to: str, e_str: str): msg = MIMEMultipart() msg['From'] = _format('Tencent Server <%s>' % email_from_addr) msg['To'] = _format('Exception Receive <%s>' % to) msg['Subject'] = Header('Spider Exception', 'utf-8').encode() e_str = e_str.replace('<', '(') e_str = e_str.replace('>', ')') e_str = e_str.replace('\n', '<br>') e_str = e_str.replace(' ', ' ') content = email_content_template.replace('__exception__', e_str) content = content.replace( '__datetime__', datetime.now(timezone( timedelta(hours=8))).strftime('%Y-%m-%d %H:%M:%S')) msg.attach(MIMEText(content, 'html', 'utf-8')) server = smtplib.SMTP_SSL('smtp.163.com', 994) try: server.set_debuglevel(1) server.login(user=email_from_addr, password=email_password) server.send_message(msg) except BaseException as e: log.exception(e) log.error('[eMail] Send failed') finally: server.quit()
def main(): temp_file_dir = 'data-temp/' log.info("Getting object's keys") keys: Set[str] = _s3.get_all_objects_key() log.info("[Done] get all object's keys") multi_download(temp_file_dir, keys) for _dir in os.listdir(temp_file_dir): _dir = temp_file_dir + _dir if os.path.isfile(_dir): continue dir_keys: Set[str] = gen_objectKeys_from_dir(_dir) log.info('Analyze dir: %s' % _dir) try: _map = all_files(_dir) log.info('Waiting to process, files len: %s' % _map.__len__()) multi_danmaku_v2.main(_map) # analyze except BaseException as e: log.error('dir: %s occurs error' % _dir) raise e else: log.info('[Done] analyze dir: %s' % _dir) shutil.move(_dir, 'D:/spider archive') # 处理完毕, 移动到存档目录 log.info('Archive temp files done') _s3.delete_objects(dir_keys) log.info('Delete objects done') log.info('ALL DONE')
def post(self): import string, random,datetime data = schema.load(request.get_json()) data.serial = ''.join(random.choices(string.ascii_lowercase + string.digits, k = serial_length)) data.date = datetime.datetime.now() #Handling the author stuff from schema.users import UserSchema authorData = User.find_by_uuid(get_jwt_identity()) data.author_id = authorData.id #encrypting posts, in the needed cases. if data.encryptionKey: data.status = 'encrypted' try: data.content = dataEnc.encodeString(data.content,data.encryptionKey) data.encryptionKey = bcrypt.generate_password_hash(data.encryptionKey) except Exception as e: log.error('Encryption error when creating a post! Check error message: {}'.format(e)) return {"message": "Internal server error!"},500 try: data.save_to_db() #save the post authorData.activity +=1 #increment the activity authorData.save_to_db() #save it return {"message": "Post created with serial `{}`.".format(data.serial)},201 except Exception as e: log.error('Database error when creating a new post. Check the error message: {}'.format(e)) return {"message":"Something went wrong. We can't upload this in our database."},500
def start_transmission(): try: """ create transmission-log """ if not os.path.exists('/usr/log/transmission.log'): call = 'touch /usr/log/transmission.log' process = subprocess.run(call, shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) call = 'chown -R debian-transmission:debian-transmission /usr/log' process = subprocess.run(call, shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) """ update ACL for transmission access """ call = 'chown -R debian-transmission:debian-transmission ' + config[ 'APPLICATION_PATH_TORRENTS'] process = subprocess.run(call, shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) call = 'chown -R debian-transmission:debian-transmission ' + config[ 'APPLICATION_PATH_OTRKEYS'] process = subprocess.run(call, shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) call = 'chown -R debian-transmission:debian-transmission ' + config[ 'APPLICATION_PATH_VIDEOS'] process = subprocess.run(call, shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) """ restart transmission service """ call = 'service transmission-daemon start' log.debug(call) process = subprocess.run(call, shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) time.sleep(5) log.info('init transmission-deamon finished. Returns {!s}'.format( process.stdout.decode(encoding='utf-8'))) return True except subprocess.CalledProcessError as e: log.error('init transmission-deamon failed with cmd:{!s} because {!s}'. format(e.cmd, e.stderr)) return False except: log.exception('start transmission failed... ') return False
def send_email(sender, receiver, message): try: smtpObj = smtplib.SMTP_SSL(config.MAIL_HOST, 465) # smtpObj.set_debuglevel(1) smtpObj.login(config.MAIL_USER, config.MAIL_PASS) smtpObj.sendmail(sender, [receiver], message.as_string()) except smtplib.SMTPException: log.error('Unable to send email')
def set_type(self, type): # if not type in Partition.nativeType.keys(): # log.error("Unknown partition type '%s'." % type) try: fst = parted.file_system_type_get(type) except Exception, msg: log.error(msg) return
def exe(self, cmd): log.debug("Executing cmd by ssh: {cmd}".format(cmd=cmd)) try: stdin, stdout, stderr = self.ssh_cl.exec_command(cmd) except paramiko.ssh_exception.SSHException as e: log.error("SSH command failed: {}\n{}".format(cmd, e)) return None, None, None return stdin, stdout.read(), stderr.read()
def create(self): command = "%s pvcreate --zero y -ff -y -d %s" % \ (LVM_PHYSICAL_VOLUME.prog, self.device) if run_script(command, self.chroot, log) != 0: log.error("Creation of physical layer on '%s' failed.", self.device) return 0 return 1
def info(device, chroot=None): pvs = LVM_PHYSICAL_VOLUME.display(chroot=chroot) if not pvs.has_key(device) or \ not pvs[device].has_key("vgname"): log.error("Unable to get physical volume information for '%s'.", device) return None return pvs[device]
def stop(self): if not self.active: return 1 command = "%s --stop '%s'" % (RAID.prog, self.device) if run_script(command, self.chroot) != 0: log.error("Deactivation of raid '%s' failed: %s", self.name, msg) return 0 self.active = False return 1
def stop(self): if not self.active: return 1 command = "%s vgchange -a n '%s'" % (LVM_VOLGROUP.prog, self.name) if run_script(command, self.chroot, log) != 0: log.error("Deactivation of volume group '%s' failed.", self.name) return 0 self.active = False return 1
def info(name, chroot=None): lvs = LVM_LOGICAL_VOLUME.display(chroot=chroot) if not lvs.has_key(name) or \ not lvs[name].has_key("device") or \ not lvs[name].has_key("lvsize"): log.error("Unable to get logical volume information for '%s'.", name) return None return lvs[name]
def create(self, size): command = "%s lvcreate -n '%s' --size %dk '%s'" % \ (LVM_LOGICAL_VOLUME.prog, self.name, (size / 1024), self.volgroup) if run_script(command, self.chroot, log) != 0: log.error("Creation of logical volume '%s' on '%s' failed.", self.name, self.volgroup) return 0 self.active = 1 return 1
def info(name, chroot=None): vgs = LVM_VOLGROUP.display(chroot=chroot) if not vgs.has_key(name) or \ not vgs[name].has_key("format") or \ not vgs[name].has_key("pesize") or \ not vgs[name].has_key("vgsize"): log.error("Unable to get volume group information for '%s'.", name) return None return vgs[name]
def check_dir(buildroot, dir): d = buildroot+dir try: check_exists(buildroot, dir) except: log.error("Directory '%s' does not exist.", dir) return 0 if not os.path.isdir(d): log.error("'%s' is no directory.", dir) return 0 return 1
def update_num_hours(self): try: self.num_hours = int(request.args.get('num_hours', self.previous.num_hours)) except ValueError: flash('seanweather didnt like the number of hours, using %d' % _DEFAULT_NUM_HOURS) log.error('bad number of hours. request: %s, prev: %s', request.args.get('num_hours'), self.previous.num_hours) self.num_hours = _DEFAULT_NUM_HOURS log.info('num hours: %s', self.num_hours)
def _extract(self, tar, root_dir, file_path): log.debug("Extracting file {} from {} in {}".format( file_path, tar, root_dir)) try: with contextlib.closing(lzma.LZMAFile(tar)) as xz: with tarfile.open(fileobj=xz) as f: f.extract(file_path, path=root_dir) return True except Exception as e: log.error("Error when untarring file {} from {} in {}:{}".format( file_path, tar, root_dir, e)) return False
def copy_file(source, target): source_fd = target_fd = None try: try: source_fd = open(source, "r") except Exception, msg: log.error("Failed to open '%s': %s", source, msg) return 1 try: target_fd = open(target, "w") except Exception, msg: log.error("Failed to open '%s': %s", target, msg) return 1
def mutable_data_route( data_id, data_urls, writer_pubkey=None ): """ Construct a mutable data route as a dict. This can be serialized to JSON. Return the parsed JSON dict on success. Return None on error """ # sanity check if type(data_id) not in [types.StringType, types.UnicodeType]: log.error("Data ID must be a string (got '%s')" % str(data_id)) return None if type(data_urls) != types.ListType: log.error("Data URLs must be an array of strings") return None for url in data_urls: if type(url) not in [types.StringType, types.UnicodeType]: log.error("Data URL must be a string (got '%s')" % str(url)) return None if writer_pubkey is not None: if type(writer_pubkey) not in [types.StringType, types.UnicodeType]: log.error("Writer public key must be encoded as a string (got '%s')" % str(writer_pubkey)) return None route = { "id": data_id, "urls": data_urls } if writer_pubkey is not None: route['pubkey'] = writer_pubkey return route
def get_build_page(self): web = Web(url=self.build) try: req = web.get() except ConnectionError: log.error("Jenkins page {} is unavailable".format(self.build)) return None if req.status_code != 200: return None else: self.file_path = os.path.join(self.job_dir, "build_page.gz") with gzip.open(self.file_path, "wb") as f: f.write(req.content) return self.file_path