def _checkInstall(self, pkg, context, pkgQueue, depth): Log.cout(Log.DEBUG, self._outputTrace(depth, "checkInstall(%s)" % pkg)) if depth > self.MAX_DEPTH: return error.ERROR_INSTALL context.ts.setOperation(pkg, INSTALL) context.excludes.exclude(pkg) if not pkg.installed(): ret = self._checkConflictAndProvide(pkg, context, pkgQueue, depth) if ret != error.ERROR_NONE: return ret requireDict = ResolverHelper.getAndMergeRequires(pkg) for name, requires in requireDict.items(): prvpkgs = self._getByProvides(requires, context) prvpkg = self._getBestInstalledProvide(pkg, prvpkgs, context) if not prvpkg: return error.ERROR_NONE if prvpkg is pkg: continue if context.excludes.excluded(prvpkg): self._addDependency(pkg, prvpkg, context, 'install') continue if self.getInstallDir(prvpkg) == self.installRoot: ret = self._checkInstall(prvpkg, context, pkgQueue, depth + 1) if ret != error.ERROR_NONE: Log.cout(Log.ERROR, 'Check install package %s failed' % prvpkg) return ret self._addDependency(pkg, prvpkg, context, 'install') return error.ERROR_NONE
def send(self, data): header = Header() header.size = len(data) header.sequence_number = self.send_sequence data = header.to_bytes() + data while True: #TODO: this could be done more elegantly if(len(self.workers) == 0): Log.log("Shutdown multiplexer. No workers found") self.cleanup() sys.exit(0) worker = self.workers[self.send_index][0] #Increment the sequence number and the send_index to send along another worker self.send_index = (1 + self.send_index) % len(self.workers) try: worker.write(data) worker.flush() break except IOError as e: if e.errno == 11: #TODO: whut. Why did I do this? pass else: raise e self.send_sequence = self.send_sequence + 1
def _doHandleSameProvidePkg(self, pkg, context, pkgQueue, depth): """ If installed package with same name and can not coexist, we need to remove old package. """ ts, excludes = context.ts, context.excludes for provide in pkg.provides: providers = self._getByProvide(Provide(name=provide.name), context) installedPkgs = [] for package in providers: if package is pkg: continue if not ts.installed(package): excludes.exclude(package) else: installedPkgs.append(package) for package in installedPkgs: coexists, notSatisfiedPkgs = self.coexists(pkg, provide, package, context) if coexists: continue if not self._checkSamePkgWithOption(pkg, package, context, depth): return error.ERROR_INSTALL, None ret = self._remove(package, context, pkgQueue, depth + 1) if ret != error.ERROR_NONE: logInfo = self._outputTrace(depth, 'Handle same provide between %s and %s failed' % (pkg, package)) Log.cout(Log.ERROR, logInfo) return ret, notSatisfiedPkgs self._addDependency(pkg, package, context, 'remove') return error.ERROR_NONE, None
def _check(self, pkg, context, pkgQueue, depth): Log.cout(Log.DEBUG, self._outputTrace(depth, 'check(%s)' % pkg)) ts, excludes = context.ts, context.excludes ret = error.ERROR_NONE context.checkedPkgs.add(pkg) requireDict = ResolverHelper.getAndMergeRequires(pkg) #May be need sort this require. items = self._sortRequireByName(requireDict) for name, requires in items: prvpkgs = self._getByProvides(requires, context) if pkg in prvpkgs: continue prvpkgs = ResolverHelper.filterPkgByRequires(prvpkgs, requireDict) prvpkg = self._getBestInstalledProvide(pkg, prvpkgs, context) if prvpkg is not None: ret = self._doCheck(prvpkg, context, pkgQueue, depth) if ret != error.ERROR_NONE: return ret if self._needUpgradeLocalProvide(pkg, name, prvpkg, context): self._upgradeLocalProvide(pkg, requires, prvpkgs, context, pkgQueue) continue if context.initOperation == CHECK: return error.ERROR_CHECK prvpkgs = self._filterAndSortPackages(prvpkgs, pkg) pkgQueue.append((INSTALL, pkg, requires, prvpkgs)) return error.ERROR_NONE
def _killRemoteCmd(self, host, executorPath, cmdStr, remoteUser, remoteSudo): pathIndex = cmdStr.find(executorPath) if pathIndex == -1: return False rawCmd = cmdStr[pathIndex:] rawCmd = ' '.join(rawCmd.split()) Log.cout(Log.INFO, 'kill ainst2 process on the remote host %s ...' % host) cmd = 'ssh %s@%s ps -efw | grep \'%s\' | grep -v \'ssh %s@%s\' | grep -v grep'\ % (remoteUser, host, rawCmd, remoteUser, host) out, err, code = process.runRedirected(cmd, self._killTimeout) if code != 0: Log.cout(Log.ERROR, 'get remote pid failed') return False pidList = [] contentList = out.split('\n') for content in contentList: if not content or not content.strip(): continue items = content.split() pidList.append(items[1]) if not pidList: return True pidSet = set(pidList) index = 0 while index < len(pidList): subPidList = self._getSubPidList(remoteUser, host, pidList[index]) for subPid in subPidList: if subPid not in pidSet: pidList.append(subPid) pidSet.add(subPid) index += 1 return self._killRemotePid(pidList, remoteUser, host, remoteSudo)
def _getMetaFile(self, repoMdObj, metaName, fileName): if not repoMdObj.repoMdDatas.has_key(metaName): return False metaObj = repoMdObj.repoMdDatas[metaName] destTmpFile = self._getRepoDataDir() + '/' + \ metaObj.locationHref.split('/')[-1] metaUrl = self.repoConfig.baseurl + '/' + metaObj.locationHref # uncompressTmpFile = '.'.join(destTmpFile.split('.')[:-1]) + '.tmp' uncompressTmpFile = self._getRepoDataDir() + '/' + \ fileName + '.tmp' if not file_util.remove(destTmpFile) or\ not file_util.remove(uncompressTmpFile): return False if not self.fileFetcher.fetch(metaUrl, destTmpFile) or\ not file_util.chmod(destTmpFile, 0666): return False try: if destTmpFile.split('.')[-1] == 'bz2': f = bz2.BZ2File(destTmpFile) else: f = gzip.open(destTmpFile) if not file_util.writeToFile(uncompressTmpFile, f.read()) or\ not file_util.chmod(uncompressTmpFile, 0666): f.close() return False f.close() except Exception: Log.cout(Log.ERROR, 'decompress %s failed' % destTmpFile) return False return self._checkSumValid(metaObj, uncompressTmpFile)
def _doParallelOperate(self, hostSet, cmd, parallel, remoteTimeout, retryTime, retryInterval, user, remoteSudo, errorContinue): successList = [] failedList = [] if not hostSet: return successList, failedList count = 0 hostLen = len(hostSet) threaderList = [] for host in hostSet: Log.coutLabel(Log.INFO, 'Process Remote host:%s' % host) remoteCmd = 'ssh %s@%s %s' % (user, host, cmd) threader = RemoteOperatorThread(host, self._ainstBinPath, remoteCmd, remoteTimeout, retryTime, retryInterval, user, remoteSudo) threader.setDaemon(True) threaderList.append(threader) threader.start() count += 1 if count % parallel != 0 and count < hostLen: continue for threader in threaderList: threader.join() for threader in threaderList: if not threader.returnValue: failedList.append(threader.host) else: successList.append(threader.host) if not errorContinue and len(failedList) > 0: break threaderList = [] return successList, failedList
def _queue(self, pkgQueue, context, depth): if len(pkgQueue) == 0: return error.ERROR_NONE Log.cout(Log.DEBUG, self._outputTrace(depth, "queue")) ret = error.ERROR_NONE while True: tmpQueue, hasSuccess = list(), False while len(pkgQueue) > 0: item = pkgQueue.pop(0) if item[0] == INSTALL: ret = self._handleInstallItem(pkgQueue, item, context, depth) elif item[0] == REMOVE: ret = self._handleRemoveItem(pkgQueue,item, context, depth) if ret == error.ERROR_NONE: hasSuccess = True elif ret == error.ERROR_EXCLUSIVE_DEPS: tmpQueue.append(item) else: opt = 'Handle' if item[0] == INSTALL: opt = 'Install' elif item[0] == REMOVE: opt = 'Remove' content = self._outputTrace(depth, '%s pkg %s failed' % (opt, item[1])) Log.cout(Log.ERROR, content) return ret if len(tmpQueue) == 0: return error.ERROR_NONE if not hasSuccess: return error.ERROR_EXCLUSIVE_DEPS pkgQueue.extend(tmpQueue) return ret
def iterate(self): outdegrees, depDict = dict(), dict() for package in self.preDict: prerequisites = self.preDict[package] if package not in outdegrees: outdegrees[package] = 0 for prerequisite in prerequisites: if prerequisite is not None: outdegrees[package] += 1 if prerequisite not in outdegrees: outdegrees[prerequisite] = 0 depDict.setdefault(prerequisite, set()).add(package) stack = [x for x in outdegrees if outdegrees[x] == 0] count = 0 while len(stack) > 0: package = stack.pop(0) count = count + 1 yield package for dependent in depDict.get(package, []): outdegrees[dependent] -= 1 if outdegrees[dependent] <= 0: stack.append(dependent) if count != len(outdegrees): printDegreees = dict() for pkg in outdegrees: if outdegrees[pkg] != 0: printDegreees[pkg] = outdegrees[pkg] Log.cout(Log.DEBUG, printDegreees) raise DependencyGraph.CycleDependencyException, "cycle dependent"
def readLink(path): try: if isLink(path): return os.readlink(path) except Exception, e: Log.cout(Log.DEBUG, 'readlink %s failed: %s' % (path, e)) return None
def fetch(self, data=None, headers=None): """Fetch http file from network. Args: headers: {str:str} of additional request HTTP headers data: {str:*} of data to be sent via HTTP Returns: [*str] of file pointer-like HTTP stream. """ # Fetch request. if self.type == "http": rsp = self._fetch_http(data, headers) elif self.type == "ftp": rsp = self._fetch_ftp() else: Log.warning("Unknown type, cannot fetch %s for %s." % self.url, self) return None self.status = 200 # Convert header keys into all lower case. self.headers = {} for key, value in dict(rsp.info()).items(): self.headers[key.lower()] = value self.url_rsp = rsp.geturl() return rsp
def writeToFp(fp, content): try: fp.write(content) fp.flush() except Exception, e: Log.cout(Log.DEBUG, 'Write to fp failed: %s' % e) return False
def evolve(self): game_init() it_num = 0 log = Log(datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d_%H-%M-%S.csv')) while (it_num < MAX_ITERATIONS and ( self.solution is None or TARGET_SCORE == 0 or self.solution.fitness['score'] < TARGET_SCORE ) ): self.iteration(it_num) log.push(it_num, self.population) if DISPLAY_BEST == 'iteration' or (DISPLAY_BEST == 'best' and self.solution.fitness['score'] > TARGET_SCORE): print "\nBest on this iteration...\n" nn = self.population[0].get_neural_network() play(nn) it_num += 1 print '\nEvolution terminated with score: {0} and clock: {1}\n'.format(self.solution.fitness['score'], self.solution.fitness['clock']) log.save() game_end() return self.solution
def _generateConfigToRoot(self, ainstPkgDir, aicfInfo, settingMap, confDict): if aicfInfo: for path, configInfo in aicfInfo.configs.iteritems(): srcConfigPath = ainstPkgDir + '/' + path destConfigPath = self._ainstRoot.getRoot() + '/' + path if not file_util.isFile(srcConfigPath): Log.cout(Log.ERROR, 'Config file %s is not exists' % srcConfigPath) return False if not file_util.exists(destConfigPath): Log.cout(Log.ERROR, 'Dest config file %s is not exists' % destConfigPath) return False tmpDirName = self._ainstRoot.getRootVarAinstDir('tmp') tmpPath = tmpDirName + '/' + os.path.basename(destConfigPath) + '.tmp.set' if not file_util.move(destConfigPath, tmpPath): Log.cout(Log.ERROR, 'Backup config file %s failed' % destConfigPath) return False confDict[path] = (tmpPath, destConfigPath) configGenerator = ConfigGenerator() if not configGenerator.generateConfig(srcConfigPath, destConfigPath, configInfo.mode, configInfo.noReplace, settingMap): Log.cout(Log.ERROR, 'Generate Config file %s failed' % path) return False else: Log.cout(Log.DEBUG, 'No aicf file, so no config will be changed') return True
def _doFetch(self, url): socket.setdefaulttimeout(self.timeout) try: request = urllib2.Request(url) response = urllib2.urlopen(request) chunked = self._isChunked(response) content = '' if not chunked: length = int(response.info().getheader('Content-Length')) if length > self.maxFileLength: return FetchError.FETCH_TOO_LARGE, None content = response.read() else: length = 0 while True: line = response.readline(self.lineLength) if not line: break content += line length += len(line) if length > self.maxFileLength: return FetchError.FETCH_TOO_LARGE, None response.close() return FetchError.FETCH_SUCCESS, content except Exception, e: Log.cout(Log.ERROR, 'Fetch failed: %s' % e) if hasattr(e, 'reason'): if str(e.reason) == 'timed out': return FetchError.FETCH_TIMEOUT, None return FetchError.FETCH_OTHER_ERROR, None
def __init__(self, message, status_code=None, payload=None): Exception.__init__(self) Log.exception(message) self.message = message if status_code is not None: self.status_code = status_code self.payload = payload
def _generateConfigByExpand(self, srcPath, destPath, noReplace, settings): content = file_util.readFromFile(srcPath) if content is None: Log.cout(Log.ERROR, 'Read config file %s failed' % srcPath) return False replacer = KeyValueReplacer(settings) content = replacer.replace(content) return file_util.writeToFile(destPath, content)
def makeCache(self): repoList = self.getEnabledRepo() for repo in repoList: if not repo.makeCache(): Log.coutValue(Log.INFO, repo.id, 'failed') else: Log.coutValue(Log.INFO, repo.id, 'success') return True
def _checkMetaFile(self, repoMd, metaName, fileName): metaFile = self._getRepoDataDir() + '/' + fileName if not file_util.exists(metaFile): return False if not repoMd.repoMdDatas.has_key(metaName): Log.cout(Log.ERROR, '%s not found in repomd.xml' % metaName) return False metaObj = repoMd.repoMdDatas[metaName] return self._checkSumValid(metaObj, metaFile)
def makeCache(self, cacheParam): repoStorage = self._getRepoStorage(self._ainstConf, cacheParam.repos) if repoStorage is None: Log.cout(Log.ERROR, 'Get repo storage failed') return OperatorRet.OPERATE_FAILED if not repoStorage.makeCache(): Log.cout(Log.ERROR, 'RepoStorage makeCache failed') return OperatorRet.OPERATE_FAILED return OperatorRet.OPERATE_SUCCESS
def _selectInstallPkgs(self, context, pkgs): installPkgs = [] for pkg in pkgs: installPkg = self._selectPkg(context, pkg) if installPkg == None: Log.cout(Log.ERROR, 'No packages %s available. Abort Installation.' % pkg) return None installPkgs.append(installPkg) return installPkgs
def getDirNameByIndex(self, index): if not self._header: Log.cout(Log.ERROR, 'Get dir name failed') return None dirListLength = len(self._getDirs()) if index > dirListLength: Log.cout(Log.ERROR, 'Get dir name failed') return None return self._getDirs()[index]
def clearCache(self): repoList = self.getEnabledRepo() for repo in repoList: if not repo.clearCache(): Log.coutValue(Log.INFO, repo.id, 'failed') self._consoleLogger.error(log) else: Log.coutValue(Log.INFO, repo.id, 'success') return True
def _generateConfigByTemplate(self, srcPath, destPath, noReplace, settings): out, err, code = process.runRedirected(srcPath) if code != 0: Log.cout(Log.ERROR, 'Generat template config file %s failed: %s' % (destPath, err)) return False if not file_util.writeToFile(destPath, out): Log.cout(Log.ERROR, 'Write config file to %s failed' % destPath) return False return True
def rpm2dir(rpmPath, destDir, timeout=600): currentWorkdir = os.getcwd() if not file_util.isDir(destDir) and not file_util.makeDir(destDir): Log.cout(Log.ERROR, 'Make rpm dir %s failed' % destDir) return False try: os.chdir(destDir) except OSError, e: return False
def queue_tweet(self, wrapped_tweet): self.tweet_queue.append(wrapped_tweet) Log.v("READ", "Added new tweet to queue: ") wrapped_tweet.print_incoming_status() if not self.thread_locked: self.thread_locked = True self.thread = threading.Thread(target=self.work_on_tweet) self.thread.start(); else: return
def dump(self, rootInfo, path): content = '' for installRoot in rootInfo.installRootSet: content += installRoot + '\n' preUmask = os.umask(0) ret = file_util.writeToFile(path, content) os.umask(preUmask) if not ret: Log.cout(Log.ERROR, 'Dump to [%s] failed' % path) return ret
def load(self, path): rootInfoDb = self._getDb(path) if not rootInfoDb: Log.cout(Log.ERROR, 'Get root info db failed') return None rootInfo = RootInfo() rootInfo.installRootSet = set(rootInfoDb.getKeys()) rootInfoDb.close() return rootInfo
def _checkAicfInfo(self, aicfInfo): if not aicfInfo: return False for config in aicfInfo.configs.values(): items = config.destPath.split('/') if len(items) < 2 or not self._ainstRoot.isInRootDir(items[0]): Log.cout(Log.ERROR, 'Invalid file path %s' % config.destPath) return False return True
def _initRepos(self, ainstConf, repos): if not ainstConf: Log.cout(Log.ERROR, "Ainst config is None") return None repoStorage = self._initRepoStorage(ainstConf) if not repoStorage: Log.cout(Log.ERROR, "Repo storage init failed") return None repoStorage.processDisableEnable(repos) return repoStorage.getPackageSack()
def get_user(self, user_id): log = Log() log.action = 'get_user' log.sender = self log.log_var(user_id=user_id) res = None log.log_var(res=res) try: res = self.vk_session.users.get(user_ids=user_id) log.log_var(res=res) log.status = 'OK' except vk_api.VkApiError as e: log.status = 'Exception' log.log_var(exception_info=e) self.class_logger.log_info(log) return res
def get_conversation_members(self, peer_id): log = Log() log.action = 'get_conversation_members' log.sender = self log.log_var(peer_id=peer_id) res = None log.log_var(res=res) try: res = self.vk_session.messages.getConversationMembers(peer_id=peer_id)['profiles'] log.log_var(res=res) log.status = 'OK' except vk_api.VkApiError as e: log.status = 'Exception' log.log_var(exception_info=e) self.class_logger.log_info(log) return res
from logger import Log from threading import Thread from datetime import datetime, timedelta import time LOG = Log(__name__) """ Class to abstract threaded worker subtasks :param id: node identifier :param worker: the subtask worker object """ class WorkerThread(object): def __init__(self, worker, id): self.id = id self.worker = worker self.thread = None self.running = False self.start_time = 0 """ Class to construct a threaded worker :param func: thread target function :param tasks: list of subtasks (WorkerThread) :param daemon: option to run task thread daemonized """ class WorkerTasks(object):
def create_session(self, group_token, group_id): log = Log() log.sender = self log.action = 'create_session' log.log_var(group_id=group_id) try: api = vk_api.VkApi(token=group_token) log.log_var(api=api) self.longpoll = api_longpoll.VkBotLongPoll(api, group_id) self.vk_session = api.get_api() log.log_var(longpoll=self.longpoll, vk_session=self.vk_session) log.status = 'OK' except vk_api.VkApiError as e: log.status = 'Exception' log.log_var(exception_info=e) self.class_logger.log_info(log)
from logger import Log from components.statusqueue import StatusQueue from components.timelinescanner import TimelineScanner from components.tweetsender import TweetSender from components.statusanalyzer import StatusAnalyzer import json if __name__ == '__main__': auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token_key, access_token_secret) api = tweepy.API(auth) timeline_scanner = TimelineScanner(api) status_queue = StatusQueue() status_analyzer = StatusAnalyzer() tweet_sender = TweetSender(api) timeline_scanner.add_new_status_listener( status_queue.queue_tweet ) #once scanner finds a tweet, pass it to the reader status_queue.add_status_ready_listener( status_analyzer.generate_response_tweet) status_analyzer.add_response_generated_listener(tweet_sender.send_tweet) Log.v("MAIN", "Initialized scanner") stream = tweepy.Stream(auth, timeline_scanner) stream.filter(track=['dogebot']) #I want to listen in onto #dogebot first, then from there detect commands. If you want to just listen into all commands, do: #stream.filter(track=plugin_classes.get_commands_list())
def __init__(self, token, version): """Crawler instance takes version and token as parameters.""" self.version = version self.token = token Log.info('New Crawler: ver. %s' % version)
def setTarget(self, target): """Set target FB page for Crawler instance.""" self.target = target Log.info('Target: %s' % target)
from Config import config import requests from remote.download import * from logger import Log import json import time log = Log('tan.txt') # def get(url): # response = requests.get(url) # res = response.content.decode('unicode_escape') # return json.load(res) def get(url, data=None): if data: response = requests.get(url, data=data) else: response = requests.get(url) if not response.ok: raise Exception('http error:' + url + response.status_code) res = response.content.decode('unicode_escape') return json.loads(res) if __name__ == '__main__': log.info('program started:------------------------------------') info_url = config.httpInfo + 'info' finish_url = config.httpInfo + 'finish' while (True):
# -*- coding: utf-8 -*- from crawler import Crawler from logger import Log # CONFIG fb_version = '2.12' app_id = 'APP_ID' app_secret = 'APP_SECRET' token = 'access_token=' + app_id + '|' + app_secret target = 'bbcnews' if __name__ == '__main__': Log.init() crawler = Crawler(token, fb_version) crawler.setTarget(target) crawler.setTimeInterval(1) crawler.init()
num_workers=8) target_loader = DataLoader(target, batch_size=batch_size, shuffle=True, num_workers=8) source_loader = DataLoader(source, batch_size=batch_size, shuffle=True, num_workers=8) return target_loader, source_loader, test_loader if __name__ == '__main__': # create the Logger log = Log(f'logs/{setting}', method_name) # Make the dataset target_loader, source_loader, test_loader = get_setting() if args.epochs is not None: EPOCHS = args.epochs loader_lenght = 'min' dl_len = min(len(source_loader), len(target_loader)) print(f"Num of Batches ({loader_lenght}) is {dl_len}") total_steps = EPOCHS * dl_len method = NODA(net, init_lr, total_steps, device, num_classes=n_classes) print("Do a validation before starting to check it is ok...") val_loss, val_acc = valid(method, valid_loader=test_loader)
from datetime import datetime import csv from logger import Log from settings import LOGFILE, CSVFILE log = Log(LOGFILE) class UnableToWorkException(Exception): pass class Employee: def validate_user_email_exist(self): with open('emails.txt', 'r') as f: emails_from_file = (x.strip() for x in f.readlines()) if self.mail in emails_from_file: raise ValueError def write_emails_to_file(self): if self.mail: with open("emails.txt", "a") as f: f.write(self.mail + '\n') def __init__(self, name, surname, mail, phone, money_per_day): try: name = name.capitalize() except AttributeError: name = str(name).capitalize() except NameError as e:
def mark_msg_read(self, msg_id, peer_id): log = Log() log.action = 'mark_msg_read' log.sender = self log.log_var(msg_id=msg_id, peer_id=peer_id) res = 0 log.log_var(res=res) try: res = self.vk_session.messages.markAsRead(msg_id, peer_id) log.log_var(res=res) log.status = 'OK' except vk_api.VkApiError as e: log.status = 'Exception' log.log_var(exception_info=e) self.class_logger.log_info(log) return res
def get_conversations(self): log = Log() log.action = 'get_conversations' log.sender = self res = None log.log_var(res=res) try: res = self.vk_session.messages.getConversations()['items'] log.log_var(res=res) log.status = 'OK' except vk_api.VkApiError as e: log.status = 'Exception' log.log_var(exception_var=e) self.class_logger.log_info(log) return res
def get_event_type(self, event): log = Log() log.action = 'get_event_type' log.sender = self log.log_var(event=event) res = None log.log_var(res=res) try: if event.type == api_longpoll.VkBotEventType.MESSAGE_NEW: res = 'MESSAGE_NEW' elif event.type == api_longpoll.VkBotEventType.MESSAGE_TYPING_STATE: res = 'MESSAGE_TYPING_STATE' else: res = 'UNKNOWN' log.log_var(res=res) log.status = 'OK' except vk_api.VkApiError as e: log.status = 'Exception' log.log_var(exception_ifo=e) self.class_logger.log_info(log) return res
def write_msg(self, session_event, text, sticker_id=None, picture=None, keyboard=None): log = Log() log.action = 'write_msg' log.sender = self log.log_var(event=session_event, text=text, sticker_id=sticker_id, picture=picture, keyboard=keyboard) res = None log.log_var(res=res) try: data = {'peer_id': session_event.obj['peer_id'], 'random_id': session_event.obj['random_id']} if text and picture is None: data['message'] = text if sticker_id is not None: data['sticker_id'] = sticker_id if picture is not None: photo_file = self.vk_session.photos.getMessagesUploadServer( peer_id=session_event.obj['peer_id']) log.log_var(photo_file=photo_file) r_data = {'photo': open('images/pitivo.jpg', 'rb')} log.log_var(r_data=r_data) photo_data = requests.post(photo_file['upload_url'], files=r_data).json() log.log_var(photo_data=photo_data) photo = self.vk_session.photos.saveMessagesPhoto(server=photo_data['server'], photo=photo_data['photo'], hash=photo_data['hash'])[0] log.log_var(photo=photo) data['attachment'] = 'photo{0}_{1}'.format(photo['owner_id'], photo['id']) data['message'] = text if keyboard is not None: data['keyboard'] = keyboard # res = self.vk_session.messages.send(data) if keyboard is None: res = self.vk_session.messages.send(peer_id=data['peer_id'], message=data['message'], random_id=data['random_id']) else: res = self.vk_session.messages.send(peer_id=data['peer_id'], message=data['message'], keyboard=data['keyboard'], random=data, random_id=data['random_id']) log.log_var(res=res) log.status = 'OK' except vk_api.VkApiError as e: log.status = 'Exception' log.log_var(exception_info=e) self.class_logger.log_info(log) return res