def load_from_db(directory, directory_checksum): """ Checks if the directory location information is already stored in the DB. Checksum is also computed and compares with the one stored in the DB, in case there are changes in the directory Args: directory: absolute path of the photo directory directory_checksum: computed checksum of the directory Returns: database content corresponding to 'locations' key of the directory None - if not present """ try: with MongoConnector() as mongo: directory_base = os.path.basename(directory) # Check if and entry with the directory name is present in DB db_dir_metadata = mongo.find_one({'directory': directory_base }) or None # Check if directory has an entry in the db and if so # if the checksum from db is the same as the computed one if db_dir_metadata and directory_checksum == db_dir_metadata[ 'directory_checksum']: log.info("Loading data from DB...") # log.debug (json.dumps(db_dir_metadata['locations'],indent=1)) return db_dir_metadata['locations'] except KeyError as e: log.warning("Check DB structure! Key {} is missing. \ Re-computing result!".format(e)) except Exception as e: log.error(e)
def store_mutable_data_version( conf, data_id, ver ): """ Locally store the version of a piece of mutable data, so we can ensure that its version is incremented on subsequent puts. Return True if stored Return False if not """ if conf is None: conf = config.get_config() if conf is None: log.warning("No config found; cannot store version for '%s'" % data_id) return False metadata_dir = conf['metadata'] if not os.path.isdir( metadata_dir ): log.warning("No metadata directory found; cannot store version of '%s'" % data_id) return False serialized_data_id = data_id.replace("/", "\x2f").replace('\0', "\\0") version_file_path = os.path.join( metadata_dir, serialized_data_id + ".ver") try: with open( version_file_path, "w+" ) as f: f.write("%s" % ver ) return True except Exception, e: # failed for whatever reason log.warn("Failed to store version of '%s' to '%s'" % (data_id, version_file_path)) return False
def delete_mutable_data_version( conf, data_id ): """ Locally delete the version of a piece of mutable data. Return True if deleted. Return False if not """ if conf is None: conf = config.get_config() if conf is None: log.warning("No config found; cannot store version for '%s'" % data_id) return False metadata_dir = conf['metadata'] if not os.path.isdir( metadata_dir ): log.warning("No metadata directory found; cannot store version of '%s'" % data_id) return False serialized_data_id = data_id.replace("/", "\x2f").replace('\0', "\\0") version_file_path = os.path.join( metadata_dir, serialized_data_id + ".ver") try: os.unlink( version_file_path ) return True except Exception, e: # failed for whatever reason log.warn("Failed to remove version file '%s'" % (version_file_path)) return False
def gen_shard(sess, input_base_dir, image_filenames, output_filename, image_texts): """Create a TFRecord file from a list of image filenames""" writer = tf.python_io.TFRecordWriter(output_filename) for item, filename in enumerate(image_filenames): path_filename = os.path.join(input_base_dir, filename) if os.stat(path_filename).st_size == 0: log.warning('Skipping empty files: %s' % (filename, )) continue try: image_data, height, width = get_image(sess, path_filename) text, labels = get_text_and_labels(image_texts[item]) if is_writable(width, text): #查看文本和标签 # print(text,labels) if len(labels) == 0: print(text, labels) else: example = make_example(filename, image_data, labels, text, height, width) writer.write(example.SerializeToString()) else: log.info('Skipping Image with too short width: %s' % (filename, )) except Exception as e: # Some files have bogus payloads, catch and note the error, moving on log.warning('Error occured during processing file %s' % (filename, )) log.error(e) writer.close()
def filter(cls, file_fullname): try: size = path.getsize(file_fullname) last_update = path.getmtime(file_fullname) if time() - last_update > cls.__LastUpdateSeconds: # long time no update return 1 if win and last_update <= path.getctime(file_fullname): # not update after create(no create time for linux) return 1 if size < cls.__SmallFileMaxSize: # too small, looks not like a production log return 2 if file_fullname[ file_fullname.rfind('.'):].lower() in cls.__ExcludedExtensions: # known file extension, not log return 3 if (not win) and access(file_fullname, X_OK): # unix executive, not log return 4 with open(file_fullname, 'rb') as fp: # not txt file, not log if size > cls.__CodecCheckSize * 2: # 文件中间判断,准确性可能大些 fp.seek(int(size / 2)) charset = detect(fp.read(cls.__CodecCheckSize)) if charset['confidence'] < 0.5: return 5 return charset except Exception as err: log.warning(file_fullname + '\t' + str(err)) return 0
def delete_mutable_data_version(conf, data_id): """ Locally delete the version of a piece of mutable data. Return True if deleted. Return False if not """ if conf is None: conf = config.get_config() if conf is None: log.warning("No config found; cannot store version for '%s'" % data_id) return False metadata_dir = conf['metadata'] if not os.path.isdir(metadata_dir): log.warning( "No metadata directory found; cannot store version of '%s'" % data_id) return False serialized_data_id = data_id.replace("/", "\x2f").replace('\0', "\\0") version_file_path = os.path.join(metadata_dir, serialized_data_id + ".ver") try: os.unlink(version_file_path) return True except Exception, e: # failed for whatever reason log.warn("Failed to remove version file '%s'" % (version_file_path)) return False
def __copy(self, file_fullnames): output_file = '' for input_file in file_fullnames: log.info('Sampling ' + input_file) try: if self.__OutputFormat == 2: # 分目录保存样本文件 if win: curr_path = self.__OutputPath + os.sep + os.path.split( input_file)[0].replace(':', '_') else: curr_path = self.__OutputPath + os.path.split( input_file)[0] os.makedirs(curr_path, exist_ok=True) output_file = os.path.join(curr_path, os.path.split(input_file)[1]) else: # 保存在同一目录中,文件名中体现原目录结构 file_name = input_file.replace(os.sep, '_').replace(':', '_') output_file = self.__OutputPath + '/' + file_name with open(output_file, 'w', encoding='utf-8') as fp: for line in self.__readLine(self, input_file): fp.write(line) except Exception as err: log.warning(input_file + '\t' + str(err)) if os.path.exists(output_file): os.remove(output_file) continue
def register_storage(storage_impl): """ Given a class, module, etc. with the methods, register the mutable and immutable data handlers. The given argument--storage_impl--must persist for as long as the application will be using its methods. Return True on success Return False on error """ global storage_handlers storage_handlers.append(storage_impl) # sanity check for expected_method in ["make_mutable_url", "get_immutable_handler", "get_mutable_handler", \ "put_immutable_handler", "put_mutable_handler", \ "delete_immutable_handler", "delete_mutable_handler" ]: if not hasattr(storage_impl, expected_method): log.warning("Storage implementation is missing a '%s' method" % expected_method) return True
def x_config(ks, buildroot, source): # default: VGA graphics card, Generic extended super VGA monitor card = "Unknown video card" driver = "vga" videoram = 0 monitor = "Unknown monitor" hsync = "31.5 - 37.9" vsync = "50 - 61" dpms = 0 resolution = "800x600" depth = 8 options = [ ] # keyboard (kbd_layout, kbd_model, kbd_variant, kbd_options) = \ keyboard.keyboard_models[ks["keyboard"]] _card = None _driver = None _options = [ ] if ks["xconfig"].has_key("card"): try: cards = hwdata.Cards(buildroot) except Exception, msg: log.warning("Loading of monitor databae failed.") flog.info1(msg, nofmt=1) else: dict = cards.get(ks["xconfig"]["card"]) if dict and dict.has_key("driver"): _card = ks["xconfig"]["card"] _driver = dict["driver"] if dict.has_key("options"): _options.extend(dict["options"]) else: log.error("Card not found in hardware database.")
def converter_keys(definitions: List[List[str]], language: str) -> List[Key]: """ Convert all unique keys received from words' definitions to DB objects :param definitions: List of definitions received from a text file using function convert_file_to_list :param language: Keys' language :return: List with Key object(s) """ log.info("Start collecting dictionary keys") def dict_from_word_definition(wd_line: List[str]) -> dict: """ Create python dict from file's line :param wd_line: file's line as list of str :return: dict with keys: "WID", "position", "usage", "grammar_code", "body", "case_tags" """ keys = ( "id_old", "position", "usage", "grammar_code", "body", "case_tags", ) return dict(zip(keys, wd_line)) def keys_from_string(string: str) -> List[str]: key_pattern = r"(?<=\«)(.+?)(?=\»)" return re.findall(key_pattern, string) all_keys = [] without_keys_count = 0 for definition_line in definitions: body = dict_from_word_definition(definition_line)["body"] keys_from_line = keys_from_string(body) if not keys_from_line: without_keys_count += 1 else: all_keys.extend(keys_from_line) unique_keys = sorted(set(all_keys)) log.info("Total number of keys:\t\t%s", len(all_keys)) log.info("Unique keys from all:\t\t%s", len(unique_keys)) if without_keys_count: log.warning("Definitions without keys:\t%s", without_keys_count) log.info("Finish collecting dictionary keys\n") return [ Key(**{ "word": key, "language": language, }) for key in unique_keys ]
def set_boot(self, i): partition = self["partition"] if i in partition.keys(): if partition[i].ped_partition.type & parted.PARTITION_EXTENDED: log.warning("Partition %d is an extended partition, " "unable to set boot flag.", i) return 0 return partition[i].ped_partition.set_flag(\ parted.PARTITION_BOOT, 1) return 0
def update_weather_data(self, weather_getter=weather_for_user_input): self.user_input = request.args.get('user_input', self.previous.user_input) log.info('using weather API for %s', self.user_input) self.weather_data, self.location = weather_getter(self.user_input, API_KEY) if not self.weather_data: log.warning("didn't get any results from weather API") db.session.add(Lookup(self.user_input, self.location)) db.session.commit() self.data_string = jsonify(self.weather_data[:int(self.num_hours / 3)])
def _set_dict(orig_attr, orig_dict, result_attr, result_dict): if orig_attr not in orig_dict or orig_dict[orig_attr] is None: result_dict[result_attr] = None return False try: result_dict[result_attr] = unidecode.unidecode( orig_dict[orig_attr].replace('.', '')) except: log.warning("Cannot encode {} to ASCII. Skipping...".format( orig_dict[orig_attr])) return True
def update_weather_data(self, weather_getter=weather_for_user_input): self.user_input = request.args.get('user_input', self.previous.user_input) log.info('using weather API for %s', self.user_input) self.weather_data, self.location = weather_getter( self.user_input, API_KEY) if not self.weather_data: log.warning("didn't get any results from weather API") db.session.add(Lookup(self.user_input, self.location)) db.session.commit() self.data_string = jsonify(self.weather_data[:int(self.num_hours / 3)])
def set_boot(self, i): partition = self["partition"] if i in partition.keys(): if partition[i].ped_partition.type & parted.PARTITION_EXTENDED: log.warning( "Partition %d is an extended partition, " "unable to set boot flag.", i) return 0 return partition[i].ped_partition.set_flag(\ parted.PARTITION_BOOT, 1) return 0
def db_link_complexes(words: List[List[str]]) -> None: """ Create relations in DB between - primitives and derivative complexes, primitives and derivative small words, small words and combinations based on them :param words: List of words' data received from a text file using function convert_file_to_list :return: None """ log.info( "Start to create relations between primitives and their derivatives") def get_elements_from_str(set_as_str: str, separator: str = " | ") -> list: return [element.strip() for element in set_as_str.split(separator)] all_words = Word.get_all() all_word_names = [w.name for w in all_words] for item in words: if not item[10]: # If 'Used In' field does not exist continue # On idea only one parent should always be here # parents = Word.query.filter(Word.id_old == int(item[0])).all() parents = [word for word in all_words if word.id_old == int(item[0])] # LOCAL if len(parents) > 1: log.warning("The are %s for this word!\n%s", len(parents), [parent.name for parent in parents]) for parent in parents: child_names = get_elements_from_str(item[10]) # children = Word.query.filter(Word.name.in_(child_names)).order_by(Word.id.asc()).all() children = [ w for w in all_words if (w and (w.name in child_names)) ] # LOCAL children = [child for child in children if child] # In case if any unspecified word exist in used_in list [ log.debug(parent, child) for child in child_names if child not in all_word_names ] parent.add_children(children) log_text = f"{parent.name} {' ' * (26 - len(parent.name))}-> {child_names}" log.debug(log_text) db.session.commit() log.info( "Finish to create relations between primitives and their derivatives")
def get_mutable(name, data_id, ver_min=None, ver_max=None, ver_check=None, conf=None): """ get_mutable """ if conf is None: conf = config.get_config() user = get_name_record(name) if 'error' in user: # no user data return {'error': "Unable to load user record: %s" % user['error']} # find the mutable data ID data_route = user_db.get_mutable_data_route(user, data_id) if data_route is None: # no data return {'error': 'No such route'} # go and fetch the data data = storage.get_mutable_data(data_route, ver_min=ver_min, ver_max=ver_max, ver_check=ver_check) if data is None: # no data return {'error': 'No mutable data found'} # what's the expected version of this data? expected_version = load_mutable_data_version( conf, name, data_id, try_remote=False ) if expected_version is not None: if expected_version > data['ver']: return {'error': 'Stale data', 'ver_minimum': expected_version, 'ver_received': data['ver']} elif ver_check is None: # we don't have a local version, and the caller didn't check it. log.warning("Unconfirmed version for data '%s'" % data_id) data['warning'] = "Unconfirmed version" # remember latest version if data['ver'] > expected_version: store_mutable_data_version( conf, data_id, data['ver'] ) # include the route data['route'] = data_route return data
def run_ks_script(dict, chroot): interpreter = "/bin/sh" if dict.has_key("interpreter"): interpreter = dict["interpreter"] (status, rusage, msg) = runScript(interpreter, dict["script"], chroot=chroot) if msg and msg != "": flog.info1(msg, nofmt=1) if status != 0: if dict.has_key("erroronfail"): log.error("Script failed, aborting.") return 0 else: log.warning("Script failed.") return 1
def post(self): if ALLOW_REGISTRATION == False: log.warning( '{} tried to register, even if they are closed!'.format( request.remote_addr)) return { "message": "Sorry, but you can't register right now. We have a limited number of users." }, 401 data = schema.load(request.get_json()) if User.find_by_name(data.username): return { "message": "This username already exists in our database. Try to login or reset the password" }, 401 if User.find_by_email(data.email): return { "message": "This email already exists in our database. Try to login or reset the password" }, 401 data.uuid = str(uuid.uuid4()) data.activity = 0 val = Validation() if val.validatePassword(data.password): data.password = bcrypt.generate_password_hash(data.password) else: return { "message": "A password must have between 8 and 64 characters long and it must contain letters (uppercase and lowercase) and digits." }, 400 try: data.save_to_db() log.info('A new user has been added to our database!') return { "message": "User with uuid `{}` has registered successfully".format( data.uuid) }, 201 except Exception as e: log.error( 'Database error at user registeration. Check the error message: {}' .format(e)) return { "message": "There was an error. We can't save this user to our database." }, 500
def post(self): schema = UserSchema(partial=True) uuid = get_jwt_identity() user = User.find_by_uuid(uuid) data = schema.load(request.get_json()) if not data.password: return {"message": "You must enter your password!"}, 401 if bcrypt.check_password_hash(user.password, data.password): access_token = create_access_token(identity=uuid, fresh=True) refresh_token = create_access_token(identity=uuid) log.info("User `{}` refreshed his login from {}".format( data.username, request.remote_addr)) return { "message": "Access granted. You have renewed you session", "access_token": access_token, "refresh_token": refresh_token }, 200 log.warning("User `{}` failed to enter the correct password".format( data.username)) return {"message": "Wrong password. Try again!"}, 401
def post(self): schema = UserSchema(partial=True) data = schema.load(request.get_json()) user = User.find_by_email(data.email) if not user: return {"message": "This user doesn't exist in our database."}, 404 if not bcrypt.check_password_hash(user.password, data.password): log.warning( "User `{}` failed to enter the correct password. The request was made from `{}`" .format(user.username, request.remote_addr)) return {"message": "Invalid credentials!"}, 401 access_token = create_access_token(identity=user.uuid, fresh=True) refresh_token = create_refresh_token(identity=user.uuid) log.info("User `{}` has logged in from {}.".format( user.username, request.remote_addr)) # current_app.logger.info('%s logged in successfully', user.username) return { "message": "Successfuly logged in!", "access_token": access_token, "refresh_token": refresh_token }, 200
def __merge(self, file_fullnames): # 列表中文件每个一行的形式输出到os.ipaddress.sample.dat if win: output_filename = gethostbyname(gethostname()) + '.samples.dat' else: cmd = "ifconfig|grep 'inet addr:'|grep -v '127.0.0.1'|cut -d: -f2|awk '{print $1}'|head -1" output_filename = os.popen(cmd).read().strip() + '.samples.dat' with open(os.path.join(self.__OutputPath, output_filename), 'w', encoding='utf-8') as fp: for file_fullname in file_fullnames: log.info('Sampling ' + file_fullname) current_position = fp.tell() try: fp.write('\n' + file_fullname + '\t') for line in self.__readLine(self, file_fullname): fp.write(line.replace('\n', '\0')) except Exception as err: log.warning(file_fullname + '\t' + str(err)) fp.seek(current_position) continue
def store_mutable_data_version(conf, data_id, ver): """ Locally store the version of a piece of mutable data, so we can ensure that its version is incremented on subsequent puts. Return True if stored Return False if not """ if conf is None: conf = config.get_config() if conf is None: log.warning("No config found; cannot store version for '%s'" % data_id) return False metadata_dir = conf['metadata'] if not os.path.isdir(metadata_dir): log.warning( "No metadata directory found; cannot store version of '%s'" % data_id) return False serialized_data_id = data_id.replace("/", "\x2f").replace('\0', "\\0") version_file_path = os.path.join(metadata_dir, serialized_data_id + ".ver") try: with open(version_file_path, "w+") as f: f.write("%s" % ver) return True except Exception, e: # failed for whatever reason log.warn("Failed to store version of '%s' to '%s'" % (data_id, version_file_path)) return False
def _tag_flac(self, file): """ Tag Flac file only called from `track.tag()` """ tagger = Tagger(self, '.flac') tag = FLAC(file) tag.delete() for tag_obj in tagger.tag_map: tag[tag_obj.key] = str(tag_obj.value) # image if cc.tag_cover and self.album.picture_url is not None: cover_data = self.get_cover_data() if cover_data: img = Picture() img.type = 3 img.data = cover_data tag.clear_pictures() tag.add_picture(img) else: log.warning(f'No Cover for {self}') tag.save()
def _tag_mp3(self, file): """ Tag Mp3 file only called from `track.tag()` """ tagger = Tagger(self, '.mp3') tag = EasyMP3(file) EasyID3.RegisterTextKey('comment', 'COMM') tag.delete() for tag_obj in tagger.tag_map: tag[tag_obj.key] = str(tag_obj.value) tag.save() # image if cc.tag_cover and self.album.picture_url is not None: cover_data = self.get_cover_data() if cover_data: audio = MP3(file, ID3=ID3) audio.tags.add( APIC(encoding=3, mime='image/jpeg', type=3, desc=u'Cover', data=cover_data)) audio.save() else: log.warning(f'No Cover for {self}') # lyrics if cc.tag_lyrics: if self.lyrics != None: tag = ID3(file) tag[u"USLT::'eng'"] = (USLT(encoding=3, lang=u'eng', desc=u'desc', text=self.lyrics.lyrics_text)) tag.save()
def _tag_m4a(self, file): tagger = Tagger(self, '.m4a') tag = MP4(file) for tag_obj in tagger.tag_map: if tag_obj.key in ['trkn']: tnr, dnr = tag_obj.value.split('/') tag[tag_obj.key] = [(int(tnr), int(dnr))] elif tag_obj.key in ['disk']: tag[tag_obj.key] = [(int(tag_obj.value), 0)] elif tag_obj.key in ['tmpo']: tag[tag_obj.key] = [int(tag_obj.value)] else: tag[tag_obj.key] = str(tag_obj.value) if cc.tag_cover and self.album.picture_url is not None: cover_data = self.get_cover_data() if cover_data: tag['covr'] = [ MP4Cover(cover_data, imageformat=MP4Cover.FORMAT_JPEG) ] else: log.warning(f'No Cover for {self}') tag.save()
def register_storage( storage_impl ): """ Given a class, module, etc. with the methods, register the mutable and immutable data handlers. The given argument--storage_impl--must persist for as long as the application will be using its methods. Return True on success Return False on error """ global storage_handlers storage_handlers.append( storage_impl ) # sanity check for expected_method in ["make_mutable_url", "get_immutable_handler", "get_mutable_handler", \ "put_immutable_handler", "put_mutable_handler", \ "delete_immutable_handler", "delete_mutable_handler" ]: if not hasattr( storage_impl, expected_method ): log.warning("Storage implementation is missing a '%s' method" % expected_method ) return True
def examine(device, chroot=None): command = "%s -E '%s'" % (RAID.prog, device) if run_script(command, chroot) != 0: log.error("Unable to get raid information for '%s'.", device) return None dict = { } for line in msg.split("\n"): line.strip() if not line or len(line) < 1: continue if line.find(":") != -1: (key, value) = line.split(":", 1) key = key.strip() value = value.strip() try: if key == "Magic": dict["magic"] = value elif key == "UUID": dict["uuid"] = value elif key == "Raid Level": dict["level"] = long(value[4:]) elif key == "Raid Devices": dict["raid-devices"] = long(value) elif key == "Total Devices": dict["total-devices"] = long(value) elif key == "Preferred Minor": dict["preferred-minor"] = long(value) elif key == "State": dict["state"] = value elif key == "Active Devices": dict["active-devices"] = long(value) elif key == "Working Devices": dict["working-devices"] = long(value) elif key == "Failed Devices": dict["failed-devices"] = long(value) elif key == "Spare Devices": dict["spare-devices"] = long(value) elif key == "Layout": dict["layout"] = value elif key == "Chunk Size": dict["chunk-size"] = value except: log.error("mdadm output malformed.") return None else: splits = line.split() try: if splits[0] == "this": dict["device-number"] = long(splits[1]) except: log.error("mdadm output malformed.") return None for key in [ "magic", "uuid", "level", "raid-devices", "total-devices", "preferred-minor", "state", "active-devices", "failed-devices", "device-number" ]: if not dict.has_key(key): log.warning("Raid information for '%s' is incomplete: %s", device, key) return None dict["device"] = device return dict
def main_flow(): while True: try: # Устанавливаем соединение с локальной базой данных conn = sqlite3.connect('binance.db') conn.row_factory = sqlite3.Row cursor = conn.cursor() # Если не существует таблиц, их нужно создать (первый запуск) make_initial_tables(cursor) log.debug("Получаем все неисполненные ордера по БД") orders_info = get_db_open_orders(cursor) # формируем словарь из указанных пар, для удобного доступа all_pairs = { pair['quote'].upper() + pair['base'].upper(): pair for pair in pairs if pair['active'] } if orders_info: log.debug( "Получены неисполненные ордера из БД: {orders}".format( orders=[(order, orders_info[order]['order_pair']) for order in orders_info])) # Проверяем каждый неисполненный по базе ордер for order in orders_info: # Получаем по ордеру последнюю информацию по бирже stock_order_data = bot.orderInfo( symbol=orders_info[order]['order_pair'], orderId=order) order_status = stock_order_data['status'] log.debug("Состояние ордера {order} - {status}".format( order=order, status=order_status)) # Если ордер на покупку if orders_info[order]['order_type'] == 'buy': if not orders_info[order]['buy_verified']: # По ордеру не были получены сделки order_trades = get_order_trades( order_id=orders_info[order]['order_id'], pair=orders_info[order]['order_pair'], bot=bot) avg_rate = calc_buy_avg_rate(order_trades, log) if avg_rate > 0: update_buy_rate(cursor, conn, orders_info[order]['order_id'], avg_rate) else: log.debug( "Не удается вычислить цену покупки, пропуск" ) continue # Если ордер уже исполнен if order_status == 'FILLED': got_qty = float(stock_order_data['executedQty']) log.info(""" Ордер {order} выполнен, получено {exec_qty:0.8f}. Проверяем, не стоит ли создать ордер на продажу """.format(order=order, exec_qty=got_qty)) # смотрим, какие ограничения есть для создания ордера на продажу for elem in limits['symbols']: if elem['symbol'] == orders_info[order][ 'order_pair']: CURR_LIMITS = elem break else: raise Exception( "Не удалось найти настройки выбранной пары " + pair_name) got_qty = adjust_to_step( got_qty, CURR_LIMITS['filters'][2]['stepSize']) prices = bot.tickerBookTicker( symbol=orders_info[order]['order_pair']) # Берем цены покупок (нужно будет продавать по рынку) curr_rate = float(prices['bidPrice']) price_change = ( curr_rate / orders_info[order]['buy_price'] - 1) * 100 log.debug( "Цена изменилась на {r:0.8f}%, процент для продажи {sp:0.8f}" .format( r=price_change, sp=all_pairs[stock_order_data['symbol']] ['profit_markup'])) if price_change >= all_pairs[stock_order_data[ 'symbol']]['profit_markup']: # Отправляем команду на создание ордера с рассчитанными параметрами new_order = bot.createOrder( symbol=orders_info[order]['order_pair'], recvWindow=5000, side='SELL', type='MARKET', quantity="{quantity:0.{precision}f}". format(quantity=got_qty, precision=CURR_LIMITS[ 'baseAssetPrecision']), newOrderRespType='FULL') # Если ордер создался без ошибок, записываем данные в базу данных if 'orderId' in new_order: log.info( "Создан ордер на продажу по рынку {new_order}" .format(new_order=new_order)) store_sell_order(cursor, conn, order, new_order['orderId'], got_qty, 0) order_trades = get_order_trades( order_id=new_order['orderId'], pair=pair_name, bot=bot) avg_rate = calc_sell_avg_rate( order_trades, log) if avg_rate > 0: update_sell_rate( cursor, conn, new_order['orderId'], avg_rate) else: log.debug( "Не удается вычислить цену покупки, пропуск" ) continue # Если были ошибки при создании, выводим сообщение else: log.warning( "Не удалось создать ордер на продажу {new_order}" .format(new_order=new_order)) else: log.debug("Цена не изменилась до нужного %") # Если это ордер на продажу, и он исполнен if orders_info[order][ 'order_type'] == 'sell' and not orders_info[order][ 'sell_verified']: order_trades = get_order_trades( order_id=orders_info[order]['order_id'], pair=orders_info[order]['order_pair'], bot=bot) avg_rate = calc_sell_avg_rate(order_trades, log) if avg_rate > 0: update_sell_rate(cursor, conn, orders_info[order]['order_id'], avg_rate) else: log.debug( "Не удается вычислить цену покупки, пропуск") continue if all_pairs[orders_info[order] ['order_pair']]['use_stop_loss']: if order_status == 'FILLED' and orders_info[order][ 'order_type'] == 'buy': curr_rate = float( bot.tickerPrice(symbol=orders_info[order] ['order_pair'])['price']) if (1 - curr_rate / orders_info[order]['buy_price'] ) * 100 >= all_pairs[orders_info[order][ 'order_pair']]['stop_loss']: log.debug( "{pair} Цена упала до стоплосс (покупали по {b:0.8f}, сейчас {s:0.8f}), пора продавать" .format( pair=orders_info[order]['order_pair'], b=orders_info[order]['buy_price'], s=curr_rate)) # Получаем лимиты пары с биржи for elem in limits['symbols']: if elem['symbol'] == orders_info[order][ 'order_pair']: CURR_LIMITS = elem break else: raise Exception( "Не удалось найти настройки выбранной пары " + orders_info[order]['order_pair']) new_order = bot.createOrder( symbol=orders_info[order]['order_pair'], recvWindow=15000, side='SELL', type='MARKET', quantity="{quantity:0.{precision}f}". format(quantity=orders_info[order] ['buy_amount'], precision=CURR_LIMITS[ 'baseAssetPrecision']), ) if 'orderId' in new_order: log.info( "Создан ордер на продажу по рынку {new_order}" .format(new_order=new_order)) store_sell_order(cursor, conn, order, new_order['orderId'], got_qty, 0) order_trades = get_order_trades( order_id=new_order['orderId'], pair=orders_info[order]['order_pair'], bot=bot) avg_rate = calc_sell_avg_rate( order_trades, log) if avg_rate > 0: update_sell_rate( cursor, conn, new_order['orderId'], avg_rate) else: log.debug( "Не удается вычислить цену покупки, пропуск" ) continue else: log.debug("Неисполненных ордеров в БД нет") log.debug( 'Получаем из настроек все пары, по которым нет неисполненных ордеров' ) # Получаем из базы все ордера, по которым есть торги, и исключаем их из списка, по которому будем создавать новые ордера for row in get_db_running_pairs(cursor): del all_pairs[row] # Если остались пары, по которым нет текущих торгов if all_pairs: log.debug( 'Найдены пары, по которым нет неисполненных ордеров: {pairs}' .format(pairs=list(all_pairs.keys()))) for pair_name, pair_obj in all_pairs.items(): try: log.debug( "Работаем с парой {pair}".format(pair=pair_name)) # Получаем лимиты пары с биржи for elem in limits['symbols']: if elem['symbol'] == pair_name: CURR_LIMITS = elem break else: raise Exception( "Не удалось найти настройки выбранной пары " + pair_name) log.debug("Проверяем индикаторы") # Получаем свечи и берем цены закрытия, high, low klines = bot.klines(symbol=pair_name.upper(), interval=TIMEFRAME, limit=KLINES_LIMITS) klines = klines[:len(klines) - int(USE_OPEN_CANDLES)] closes = [float(x[4]) for x in klines] high = [float(x[2]) for x in klines] low = [float(x[3]) for x in klines] # Скользящая средняя sma_5 = ta.SMA(closes, 5) sma_100 = ta.SMA(closes, 100) ema_5 = ta.EMA(closes, 5) ema_100 = ta.EMA(closes, 100) enter_points = 0 if ema_5[-1] > ema_100[-1] and sma_5[-1] > sma_100[-1]: # Быстрая EMA выше медленной и быстрая SMA выше медленной, считаем, что можно входить enter_points += 1 macd, macdsignal, macdhist = ta.MACD(closes, 12, 26, 9) if macd[-1] > macdsignal[-1] and macdhist[-1] > 0: # Линия макд выше сигнальной и на гистограмме они выше нуля enter_points += 1.3 rsi_9 = ta.RSI(closes, 9) rsi_14 = ta.RSI(closes, 14) rsi_21 = ta.RSI(closes, 21) if rsi_9[-1] < 70 and rsi_14[-1] < 70 and rsi_21[ -1] < 70: # RSI не показывает перекупленности enter_points += 2 fast, slow = ta.STOCH(high, low, closes, 5, 3, 3) if fast[-1] > slow[-1]: # Быстрая линия стохастика выше медленной, вход enter_points += 1.5 fast, slow = ta.STOCHRSI(closes, 14, 3, 3) if fast[-1] > slow[-1]: # Быстрая линия STOCHRSI выше медленной, вход enter_points += 1.8 upper, middle, lower = ta.BBANDS(closes, ma_period=21) if high[-1] > upper[-1]: # Свеча пробила верхнюю полосу Боллинджера enter_points += 3 log.debug( "Свеча набрала {b} баллов".format(b=enter_points)) if enter_points < POINTS_TO_ENTER: log.debug( "Минимальный проходной балл {b}. Пропуск пары". format(b=POINTS_TO_ENTER)) continue # Получаем балансы с биржи по указанным валютам balances = { balance['asset']: float(balance['free']) for balance in bot.account()['balances'] if balance['asset'] in [pair_obj['base'], pair_obj['quote']] } log.debug("Баланс {balance}".format(balance=[ "{k}:{bal:0.8f}".format(k=k, bal=balances[k]) for k in balances ])) # Если баланс позволяет торговать - выше лимитов биржи и выше указанной суммы в настройках if balances[pair_obj['base']] >= pair_obj['spend_sum']: prices = bot.tickerBookTicker(symbol=pair_name) # Берем цены продаж (продажа будет по рынку) top_price = float(prices['askPrice']) # Рассчитываем кол-во, которое можно купить на заданную сумму, и приводим его к кратному значению my_amount = adjust_to_step( pair_obj['spend_sum'] / top_price, CURR_LIMITS['filters'][2]['stepSize']) # Если в итоге получается объем торгов меньше минимально разрешенного, то ругаемся и не создаем ордер if my_amount < float( CURR_LIMITS['filters'][2] ['stepSize']) or my_amount < float( CURR_LIMITS['filters'][2]['minQty']): log.warning(""" Минимальная сумма лота: {min_lot:0.8f} Минимальный шаг лота: {min_lot_step:0.8f} На свои деньги мы могли бы купить {wanted_amount:0.8f} После приведения к минимальному шагу мы можем купить {my_amount:0.8f} Покупка невозможна, выход. Увеличьте размер ставки """.format( wanted_amount=pair_obj['spend_sum'] / top_price, my_amount=my_amount, min_lot=float( CURR_LIMITS['filters'][2]['minQty']), min_lot_step=float(CURR_LIMITS['filters'] [2]['stepSize']))) continue # Итоговый размер лота trade_am = top_price * my_amount # Если итоговый размер лота меньше минимального разрешенного, то ругаемся и не создаем ордер if trade_am < float( CURR_LIMITS['filters'][3]['minNotional']): raise Exception( """ Итоговый размер сделки {trade_am:0.8f} меньше допустимого по паре {min_am:0.8f}. Увеличьте сумму торгов (в {incr} раз(а))""" .format(trade_am=trade_am, min_am=float(CURR_LIMITS['filters'] [3]['minNotional']), incr=float(CURR_LIMITS['filters'] [3]['minNotional']) / trade_am)) log.debug( 'Рассчитан ордер на покупку по рынку: кол-во {amount:0.8f}, примерный курс: {rate:0.8f}' .format(amount=my_amount, rate=top_price)) # Отправляем команду на бирже о создании ордера на покупку с рассчитанными параметрами new_order = bot.createOrder( symbol=pair_name, recvWindow=5000, side='BUY', type='MARKET', quantity="{quantity:0.{precision}f}".format( quantity=my_amount, precision=CURR_LIMITS['baseAssetPrecision'] ), newOrderRespType='FULL') # Если удалось создать ордер на покупку, записываем информацию в БД if 'orderId' in new_order: log.info("Создан ордер на покупку {new_order}". format(new_order=new_order)) add_db_new_order(cursor, conn, pair_name, new_order['orderId'], my_amount, top_price) # Получить итоговую цену ордера log.debug( 'Получаем сделки и вычисляем комиссию') order_trades = get_order_trades( order_id=new_order['orderId'], pair=pair_name, bot=bot) avg_rate = calc_buy_avg_rate(order_trades, log) if avg_rate > 0: update_buy_rate(cursor, conn, new_order['orderId'], avg_rate) else: log.warning( "Не удалось создать ордер на покупку! {new_order}" .format(new_order=str(new_order))) else: log.warning( 'Для создания ордера на покупку нужно минимум {min_qty:0.8f} {curr}, выход' .format(min_qty=pair_obj['spend_sum'], curr=pair_obj['base'])) except: log.exception("Пропускаем пару " + pair_name) else: log.debug('По всем парам есть неисполненные ордера') except Exception as e: log.exception(e) finally: conn.close()
def remove(self, device): if not device in self.devices: log.warning("Device '%s' is not in use.", device) return 1 self.devices.remove(device) return 1
def run(self): with open(self.__SampleListFile, 'w', encoding='utf-8') as fp: scaned_files, sampled_files, err_counters = 0, 0, [ 0, 0, 0, 0, 0, 0 ] for initial_path in self.__InitialPaths: for dir_path, dir_names, file_names in os.walk(initial_path): if False in [ not match(excluded_path, dir_path) for excluded_path in self.__ExcludedPaths ]: # 跳过例外目录 dir_names[:] = [] # 跳过例外目录的子目录 continue if not os.access(dir_path, os.X_OK | os.R_OK): # 有的目录下面的循环拦不住! log.warning('[Permission Denied:] ' + dir_path) continue for dir_name in dir_names: # 对无权进入的子目录,从扫描列表中清除并记录告警日志 dir_fullname = os.path.join(dir_path, dir_name) if not os.access(dir_fullname, os.X_OK | os.R_OK): dir_names.remove(dir_name) log.warning('[Permission denied:] ' + dir_fullname) if len(file_names ) > self.__MaxFiles: # 目录下文件特别多,很可能是数据文件目录 log.warning('[Too Many Files]( ' + str(len(file_names)) + '), Ignoring:' + dir_path) continue timer = time.time() for file_name in file_names: try: scaned_files += 1 if scaned_files % 1000 == 0: log.info( 'Files scaned:[%d], error[%d], inactive[%d], small[%d], wrong-type[%d], non-text[%d], candidate[%d]\t%s' % (scaned_files, err_counters[0], err_counters[1], err_counters[2], err_counters[3], err_counters[4] + err_counters[5], sampled_files, dir_path)) if time.time( ) - timer > self.__MaxSeconds: # Too slow to scan a folder log.warning( '[Too slow to scan, Ignoring:]( ' + dir_path) break time.sleep(self.__SleepSeconds) # 防止过多占有系统资源 file_fullname = os.path.join(dir_path, file_name) rc = Judger.filter(file_fullname) if type(rc) is int: # 该文件不是候选日志,无需采 err_counters[rc] += 1 continue print(file_fullname, file=fp) sampled_files += 1 except Exception as err: # 出现过目录/文件名为乱字符导致写fp文件出现字符集异常情况 log.error(str(err)) log.info( 'Finish scan:[%d], error[%d], inactive[%d], small[%d], wrong-type[%d], non-text[%d], candidate[%d]' % (scaned_files, err_counters[0], err_counters[1], err_counters[2], err_counters[3], err_counters[4] + err_counters[5], sampled_files))
def getPackages(self, ks, languages, all_comps, has_raid, fstypes): groups = [ ] pkgs = [ ] everything = False if ks.has_key("packages") and \ ks["packages"].has_key("groups") and \ len(ks["packages"]["groups"]) > 0: groups = ks["packages"]["groups"] # add default group "base" and "core if it is not in groups and # nobase is not set if not ks.has_key("packages") or not ks["packages"].has_key("nobase"): if not "base" in groups: groups.append("base") if not "core" in groups: groups.append("core") if all_comps: repos = self.repos.keys() else: repos = self.base_repo_names if "everything" in groups: for repo in repos: for group in self.repos[repo].comps.getGroups(): if not group in groups: groups.append(group) groups.remove("everything") everything = True if ks.has_key("packages") and ks["packages"].has_key("add") and \ "*" in ks["packages"]["add"]: # add all packages for repo in self.repos.keys(): pkgs.extend(self.repos[repo].getNames()) else: # add default desktop if ks.has_key("xconfig"): if ks["xconfig"].has_key("startxonboot"): if not "base-x" in groups: log.info1("Adding group 'base-x'.") groups.append("base-x") desktop = "GNOME" if ks["xconfig"].has_key("defaultdesktop"): desktop = ks["xconfig"]["defaultdesktop"] desktop = "%s-desktop" % desktop.lower() if not desktop in groups: log.info1("Adding group '%s'.", desktop) groups.append(desktop) normalizeList(groups) # test if groups are available repo_groups = { } for group in groups: found = False for repo in repos: if not self.repos[repo].comps: continue _group = self.repos[repo].comps.getGroup(group) if not _group: continue found = True if not _group in repo_groups.keys() or \ not repo in repo_groups[_group]: repo_groups.setdefault(_group, [ ]).append(repo) if not found: log.warning("Group '%s' does not exist.", group) del groups # add packages for groups for group in repo_groups: for repo in repo_groups[group]: comps = self.repos[repo].comps for pkg in comps.getPackageNames(group): if len(self.repos[repo].searchPkgs([pkg])) > 0: if not pkg in pkgs: pkgs.append(pkg) if everything: # add all packages in this group for (pkg, req) in \ comps.getConditionalPackageNames(group): if len(self.repos[repo].searchPkgs([pkg])) > 0: if not pkg in pkgs: pkgs.append(pkg) del repo_groups # add packages if ks.has_key("packages") and ks["packages"].has_key("add"): for name in ks["packages"]["add"]: if name == "*": continue found = False for repo in self.repos.keys(): _pkgs = self.repos[repo].searchPkgs([name]) if len(_pkgs) > 0: # silently add package if not name in pkgs: pkgs.append(name) found = True break if not found: log.warning("Package '%s' is not available.", pkg) # remove packages if ks.has_key("packages") and ks["packages"].has_key("drop"): for pkg in ks["packages"]["drop"]: if pkg in pkgs: log.info1("Removing package '%s'.", pkg) pkgs.remove(pkg) # add xorg driver package for past FC-5, RHEL-4 if ks.has_key("xconfig"): if (self.isRHEL() and self.cmpVersion("4.9") > 0) or \ (self.isFedora() and self.cmpVersion("4") > 0): if ks["xconfig"].has_key("driver"): self._addPkg("xorg-x11-drv-%s" % ks["xconfig"]["driver"], pkgs) else: if not "xorg-x11-drivers" in pkgs: self._addPkg("xorg-x11-drivers", pkgs) # add packages for needed filesystem types for fstype in fstypes: if fstype == "swap": continue self._addPkgByFilename("/sbin/mkfs.%s" % fstype, pkgs, "%s filesystem creation" % fstype) # add comps package if not "comps" in pkgs: try: self._addPkg("comps", pkgs) except: # ignore missing comps package pass # append mdadm if has_raid: self._addPkgByFilename("/sbin/mdadm", pkgs, "raid configuration") # append authconfig if ks.has_key("authconfig"): self._addPkgByFilename("/usr/sbin/authconfig", pkgs, "authentication configuration") # append iptables and config tool if ks.has_key("firewall") and \ not ks["firewall"].has_key("disabled"): self._addPkg("iptables", pkgs) # no firewall config tool in RHEL-3 if (self.isRHEL() and self.cmpVersion("4") >= 0) or \ (self.isFedora() and self.cmpVersion("3") >= 0): self._addPkgByFilename("/usr/sbin/lokkit", pkgs, "firewall configuration") # append lokkit if ks.has_key("selinux") and \ ((self.isRHEL() and self.cmpVersion("4") >= 0) or \ (self.isFedora() and self.cmpVersion("3") >= 0)): self._addPkgByFilename("/usr/sbin/lokkit", pkgs, "selinux configuration") # append kernel if not "kernel" in pkgs and not "kernel-smp" in pkgs: self._addPkg("kernel", pkgs) # append kernel-devel for FC-6 and RHEL-5 if "gcc" in pkgs and \ ((self.isRHEL() and self.cmpVersion("5") >= 0 and \ (self.getVariant() != "Client" or \ "%s-Workstation" % (self.release) in self.repos.keys())) or \ (self.isFedora() and self.cmpVersion("6") >= 0)): if "kernel" in pkgs: self._addPkg("kernel-devel", pkgs) elif "kernel-smp" in pkgs: self._addPkg("kernel-smp-devel", pkgs) # append firstboot if ks.has_key("firstboot") and \ not ks["firstboot"].has_key("disabled"): self._addPkg("firstboot", pkgs) # append dhclient if ks.has_key("bootloader"): self._addPkg("grub", pkgs) # if self.getArch() == "ia64": # self._addPkg("elilo", pkgs) # elif self.getArch in [ "s390", "s390x" ]: # self._addPkg("s390utils", pkgs) # elif self.getArch() in [ "ppc", "ppc64" ]: # self._addPkg("yaboot", pkgs) # else: # self._addPkg("grub", pkgs) # append grub if ks.has_key("network") and len(ks["network"]) > 0: for net in ks["network"]: if net["bootproto"] == "dhcp": self._addPkg("dhclient", pkgs) # languages (pre FC-6 and pre RHEL-5) if len(languages) > 0: for repo in repos: _repo = self.repos[repo] if not _repo.comps: continue for group in _repo.comps.grouphash.keys(): self._compsLangsupport(pkgs, _repo.comps, languages, group) normalizeList(pkgs) return pkgs
def load(self, ks, dir, beta_key_verify=False): self.dir = dir self.exclude = None # mount source to dir if ks.has_key("cdrom"): self.url = mount_cdrom(dir) if ks["cdrom"].has_key("exclude"): self.exclude = ks["cdrom"]["exclude"] elif ks.has_key("nfs"): opts = None if ks["nfs"].has_key("opts"): opts = ks["nfs"]["opts"] self.url = mount_nfs("nfs://%s:%s" % \ (ks["nfs"]["server"], ks["nfs"]["dir"]), dir, options=opts) if ks["nfs"].has_key("exclude"): self.exclude = ks["nfs"]["exclude"] else: self.url = ks["url"]["url"] if ks["url"].has_key("exclude"): self.exclude = ks["url"]["exclude"] # create network cache self.cache = NetworkCache([ self.url ], cachedir=rpmconfig.cachedir) # get source information via .discinfo file if not self.cache.cache(".discinfo"): log.error("No .discinfo for '%s'", self.url) return 0 di = get_discinfo(self.cache.cache(".discinfo")) if not di: log.error("Getting .discinfo for '%s' failed.", self.url) return 0 (self.name, self.version, self.arch) = di if self.name.startswith("Red Hat Enterprise Linux"): self.variant = self.name[24:].strip() self.id = "RHEL" self.prefix = "RedHat" elif self.name.startswith("Fedora"): self.variant = "" self.id = "FC" self.prefix = "Fedora" else: log.error("Unknown source '%s'.", self.name) return 0 self.release = "%s-%s" % (self.id, self.version) log.info1("Installation source: %s %s [%s]", self.name, self.version, self.arch) # load repos repos = [ ] self.yumconf = YumConf(self.version, self.arch, rpmdb=None, filenames=[ ], reposdirs=[ ]) if self.isRHEL() and self.cmpVersion("4.9") >= 0: # RHEL-5 key = None skip = False if ks.has_key("key"): key = ks["key"].keys()[0] if ks["key"][key].has_key("skip"): skip = True key = None inum = None if key and not skip and not beta_key_verify: if not instnum: log.warning("Unable to verify installation key, " "module instkey is missing.. using default" "installation.") else: try: inum = instnum.InstNum(key) except: log.error("Installation key '%s' is not valid.", key) return 0 if inum: if inum.get_product_string().lower() != \ self.variant.lower(): log.error("Installation key for '%s' does not match " "'%s' media.", inum.get_product_string().lower(), self.variant.lower()) return 0 for name, path in inum.get_repos_dict().items(): if path == "VT" and \ not self.arch in [ "i386", "x86_64", "ia64" ]: continue repos.append(path) else: # BETA if self.variant == "Server": repos.append("Server") if key and key.find("C") >= 0: repos.append("Cluster") if key and key.find("S") >= 0: repos.append("ClusterStorage") elif self.variant == "Client": repos.append("Client") if key and key.find("W") >= 0: repos.append("Workstation") if self.arch in [ "i386", "x86_64", "ia64" ]: if key and key.find("V") >= 0: repos.append("VT") for repo in repos: repo_name = "%s-%s" % (self.release, repo) if repo in self.repos: log.error("Repository '%s' already defined.", repo_name) return 0 log.info1("Loading repo '%s'", repo_name) # create yumconf self.yumconf[repo_name] = { } self.yumconf[repo_name]["baseurl"] = [ "%s/%s" % (self.url, repo) ] if self.exclude: self.yumconf[repo_name]["exclude"] = self.exclude _repo = getRepoDB(rpmconfig, self.yumconf, reponame=repo_name) self.repos[repo_name] = _repo if not _repo.read(): log.error("Could not load repository '%s'.", repo_name) return 0 else: # RHEL <= 4 # FC repo = self.release self.yumconf[repo] = { } self.yumconf[repo]["baseurl"] = [ self.url ] if self.exclude: self.yumconf[repo]["exclude"] = self.exclude _repo = getRepoDB(rpmconfig, self.yumconf, reponame=repo) self.repos[repo] = _repo if not _repo.read(): log.error("Could not load repository '%s'.", repo) return 0 if not _repo.comps: # every source repo has to have a comps log.error("Missing comps file for '%s'.", repo) return 0 self.base_repo_names = self.repos.keys() if not ks.has_key("repo"): return 1 for repo in ks["repo"]: if repo in self.repos: log.error("Repository '%s' already defined.", repo) return 0 log.info1("Loading repository '%s'", repo) self.yumconf[repo] = { } url = ks["repo"][repo]["baseurl"] if url[:6] == "nfs://": d = "%s/%s" % (dir, repo) create_dir("", d) url = mount_nfs(url, d) self.yumconf[repo]["baseurl"] = [ url ] if ks["repo"][repo].has_key("exclude"): self.yumconf[repo]["exclude"] = ks["repo"][repo]["exclude"] if ks["repo"][repo].has_key("mirrorlist"): self.yumconf[repo]["mirrorlist"] = \ ks["repo"][repo]["mirrorlist"] _repo = getRepoDB(rpmconfig, self.yumconf, reponame=repo) self.repos[repo] = _repo if not _repo.read(): log.error("Could not load repository '%s'.", repo) return 0 return 1
def getPackages(self, ks, languages, all_comps, has_raid, fstypes): groups = [] pkgs = [] everything = False if ks.has_key("packages") and \ ks["packages"].has_key("groups") and \ len(ks["packages"]["groups"]) > 0: groups = ks["packages"]["groups"] # add default group "base" and "core if it is not in groups and # nobase is not set if not ks.has_key("packages") or not ks["packages"].has_key("nobase"): if not "base" in groups: groups.append("base") if not "core" in groups: groups.append("core") if all_comps: repos = self.repos.keys() else: repos = self.base_repo_names if "everything" in groups: for repo in repos: for group in self.repos[repo].comps.getGroups(): if not group in groups: groups.append(group) groups.remove("everything") everything = True if ks.has_key("packages") and ks["packages"].has_key("add") and \ "*" in ks["packages"]["add"]: # add all packages for repo in self.repos.keys(): pkgs.extend(self.repos[repo].getNames()) else: # add default desktop if ks.has_key("xconfig"): if ks["xconfig"].has_key("startxonboot"): if not "base-x" in groups: log.info1("Adding group 'base-x'.") groups.append("base-x") desktop = "GNOME" if ks["xconfig"].has_key("defaultdesktop"): desktop = ks["xconfig"]["defaultdesktop"] desktop = "%s-desktop" % desktop.lower() if not desktop in groups: log.info1("Adding group '%s'.", desktop) groups.append(desktop) normalizeList(groups) # test if groups are available repo_groups = {} for group in groups: found = False for repo in repos: if not self.repos[repo].comps: continue _group = self.repos[repo].comps.getGroup(group) if not _group: continue found = True if not _group in repo_groups.keys() or \ not repo in repo_groups[_group]: repo_groups.setdefault(_group, []).append(repo) if not found: log.warning("Group '%s' does not exist.", group) del groups # add packages for groups for group in repo_groups: for repo in repo_groups[group]: comps = self.repos[repo].comps for pkg in comps.getPackageNames(group): if len(self.repos[repo].searchPkgs([pkg])) > 0: if not pkg in pkgs: pkgs.append(pkg) if everything: # add all packages in this group for (pkg, req) in \ comps.getConditionalPackageNames(group): if len(self.repos[repo].searchPkgs([pkg])) > 0: if not pkg in pkgs: pkgs.append(pkg) del repo_groups # add packages if ks.has_key("packages") and ks["packages"].has_key("add"): for name in ks["packages"]["add"]: if name == "*": continue found = False for repo in self.repos.keys(): _pkgs = self.repos[repo].searchPkgs([name]) if len(_pkgs) > 0: # silently add package if not name in pkgs: pkgs.append(name) found = True break if not found: log.warning("Package '%s' is not available.", pkg) # remove packages if ks.has_key("packages") and ks["packages"].has_key("drop"): for pkg in ks["packages"]["drop"]: if pkg in pkgs: log.info1("Removing package '%s'.", pkg) pkgs.remove(pkg) # add xorg driver package for past FC-5, RHEL-4 if ks.has_key("xconfig"): if (self.isRHEL() and self.cmpVersion("4.9") > 0) or \ (self.isFedora() and self.cmpVersion("4") > 0): if ks["xconfig"].has_key("driver"): self._addPkg("xorg-x11-drv-%s" % ks["xconfig"]["driver"], pkgs) else: if not "xorg-x11-drivers" in pkgs: self._addPkg("xorg-x11-drivers", pkgs) # add packages for needed filesystem types for fstype in fstypes: if fstype == "swap": continue self._addPkgByFilename("/sbin/mkfs.%s" % fstype, pkgs, "%s filesystem creation" % fstype) # add comps package if not "comps" in pkgs: try: self._addPkg("comps", pkgs) except: # ignore missing comps package pass # append mdadm if has_raid: self._addPkgByFilename("/sbin/mdadm", pkgs, "raid configuration") # append authconfig if ks.has_key("authconfig"): self._addPkgByFilename("/usr/sbin/authconfig", pkgs, "authentication configuration") # append iptables and config tool if ks.has_key("firewall") and \ not ks["firewall"].has_key("disabled"): self._addPkg("iptables", pkgs) # no firewall config tool in RHEL-3 if (self.isRHEL() and self.cmpVersion("4") >= 0) or \ (self.isFedora() and self.cmpVersion("3") >= 0): self._addPkgByFilename("/usr/sbin/lokkit", pkgs, "firewall configuration") # append lokkit if ks.has_key("selinux") and \ ((self.isRHEL() and self.cmpVersion("4") >= 0) or \ (self.isFedora() and self.cmpVersion("3") >= 0)): self._addPkgByFilename("/usr/sbin/lokkit", pkgs, "selinux configuration") # append kernel if not "kernel" in pkgs and not "kernel-smp" in pkgs: self._addPkg("kernel", pkgs) # append kernel-devel for FC-6 and RHEL-5 if "gcc" in pkgs and \ ((self.isRHEL() and self.cmpVersion("5") >= 0 and \ (self.getVariant() != "Client" or \ "%s-Workstation" % (self.release) in self.repos.keys())) or \ (self.isFedora() and self.cmpVersion("6") >= 0)): if "kernel" in pkgs: self._addPkg("kernel-devel", pkgs) elif "kernel-smp" in pkgs: self._addPkg("kernel-smp-devel", pkgs) # append firstboot if ks.has_key("firstboot") and \ not ks["firstboot"].has_key("disabled"): self._addPkg("firstboot", pkgs) # append dhclient if ks.has_key("bootloader"): self._addPkg("grub", pkgs) # if self.getArch() == "ia64": # self._addPkg("elilo", pkgs) # elif self.getArch in [ "s390", "s390x" ]: # self._addPkg("s390utils", pkgs) # elif self.getArch() in [ "ppc", "ppc64" ]: # self._addPkg("yaboot", pkgs) # else: # self._addPkg("grub", pkgs) # append grub if ks.has_key("network") and len(ks["network"]) > 0: for net in ks["network"]: if net["bootproto"] == "dhcp": self._addPkg("dhclient", pkgs) # languages (pre FC-6 and pre RHEL-5) if len(languages) > 0: for repo in repos: _repo = self.repos[repo] if not _repo.comps: continue for group in _repo.comps.grouphash.keys(): self._compsLangsupport(pkgs, _repo.comps, languages, group) normalizeList(pkgs) return pkgs
def examine(device, chroot=None): command = "%s -E '%s'" % (RAID.prog, device) if run_script(command, chroot) != 0: log.error("Unable to get raid information for '%s'.", device) return None dict = {} for line in msg.split("\n"): line.strip() if not line or len(line) < 1: continue if line.find(":") != -1: (key, value) = line.split(":", 1) key = key.strip() value = value.strip() try: if key == "Magic": dict["magic"] = value elif key == "UUID": dict["uuid"] = value elif key == "Raid Level": dict["level"] = long(value[4:]) elif key == "Raid Devices": dict["raid-devices"] = long(value) elif key == "Total Devices": dict["total-devices"] = long(value) elif key == "Preferred Minor": dict["preferred-minor"] = long(value) elif key == "State": dict["state"] = value elif key == "Active Devices": dict["active-devices"] = long(value) elif key == "Working Devices": dict["working-devices"] = long(value) elif key == "Failed Devices": dict["failed-devices"] = long(value) elif key == "Spare Devices": dict["spare-devices"] = long(value) elif key == "Layout": dict["layout"] = value elif key == "Chunk Size": dict["chunk-size"] = value except: log.error("mdadm output malformed.") return None else: splits = line.split() try: if splits[0] == "this": dict["device-number"] = long(splits[1]) except: log.error("mdadm output malformed.") return None for key in [ "magic", "uuid", "level", "raid-devices", "total-devices", "preferred-minor", "state", "active-devices", "failed-devices", "device-number" ]: if not dict.has_key(key): log.warning("Raid information for '%s' is incomplete: %s", device, key) return None dict["device"] = device return dict
def get_mutable(name, data_id, ver_min=None, ver_max=None, ver_check=None, conf=None): """ get_mutable """ if conf is None: conf = config.get_config() user = get_name_record(name) if 'error' in user: # no user data return {'error': "Unable to load user record: %s" % user['error']} # find the mutable data ID data_route = user_db.get_mutable_data_route(user, data_id) if data_route is None: # no data return {'error': 'No such route'} # go and fetch the data data = storage.get_mutable_data(data_route, ver_min=ver_min, ver_max=ver_max, ver_check=ver_check) if data is None: # no data return {'error': 'No mutable data found'} # what's the expected version of this data? expected_version = load_mutable_data_version(conf, name, data_id, try_remote=False) if expected_version is not None: if expected_version > data['ver']: return { 'error': 'Stale data', 'ver_minimum': expected_version, 'ver_received': data['ver'] } elif ver_check is None: # we don't have a local version, and the caller didn't check it. log.warning("Unconfirmed version for data '%s'" % data_id) data['warning'] = "Unconfirmed version" # remember latest version if data['ver'] > expected_version: store_mutable_data_version(conf, data_id, data['ver']) # include the route data['route'] = data_route return data
def load(self, ks, dir, beta_key_verify=False): self.dir = dir self.exclude = None # mount source to dir if ks.has_key("cdrom"): self.url = mount_cdrom(dir) if ks["cdrom"].has_key("exclude"): self.exclude = ks["cdrom"]["exclude"] elif ks.has_key("nfs"): opts = None if ks["nfs"].has_key("opts"): opts = ks["nfs"]["opts"] self.url = mount_nfs("nfs://%s:%s" % \ (ks["nfs"]["server"], ks["nfs"]["dir"]), dir, options=opts) if ks["nfs"].has_key("exclude"): self.exclude = ks["nfs"]["exclude"] else: self.url = ks["url"]["url"] if ks["url"].has_key("exclude"): self.exclude = ks["url"]["exclude"] # create network cache self.cache = NetworkCache([self.url], cachedir=rpmconfig.cachedir) # get source information via .discinfo file if not self.cache.cache(".discinfo"): log.error("No .discinfo for '%s'", self.url) return 0 di = get_discinfo(self.cache.cache(".discinfo")) if not di: log.error("Getting .discinfo for '%s' failed.", self.url) return 0 (self.name, self.version, self.arch) = di if self.name.startswith("Red Hat Enterprise Linux"): self.variant = self.name[24:].strip() self.id = "RHEL" self.prefix = "RedHat" elif self.name.startswith("Fedora"): self.variant = "" self.id = "FC" self.prefix = "Fedora" else: log.error("Unknown source '%s'.", self.name) return 0 self.release = "%s-%s" % (self.id, self.version) log.info1("Installation source: %s %s [%s]", self.name, self.version, self.arch) # load repos repos = [] self.yumconf = YumConf(self.version, self.arch, rpmdb=None, filenames=[], reposdirs=[]) if self.isRHEL() and self.cmpVersion("4.9") >= 0: # RHEL-5 key = None skip = False if ks.has_key("key"): key = ks["key"].keys()[0] if ks["key"][key].has_key("skip"): skip = True key = None inum = None if key and not skip and not beta_key_verify: if not instnum: log.warning("Unable to verify installation key, " "module instkey is missing.. using default" "installation.") else: try: inum = instnum.InstNum(key) except: log.error("Installation key '%s' is not valid.", key) return 0 if inum: if inum.get_product_string().lower() != \ self.variant.lower(): log.error( "Installation key for '%s' does not match " "'%s' media.", inum.get_product_string().lower(), self.variant.lower()) return 0 for name, path in inum.get_repos_dict().items(): if path == "VT" and \ not self.arch in [ "i386", "x86_64", "ia64" ]: continue repos.append(path) else: # BETA if self.variant == "Server": repos.append("Server") if key and key.find("C") >= 0: repos.append("Cluster") if key and key.find("S") >= 0: repos.append("ClusterStorage") elif self.variant == "Client": repos.append("Client") if key and key.find("W") >= 0: repos.append("Workstation") if self.arch in ["i386", "x86_64", "ia64"]: if key and key.find("V") >= 0: repos.append("VT") for repo in repos: repo_name = "%s-%s" % (self.release, repo) if repo in self.repos: log.error("Repository '%s' already defined.", repo_name) return 0 log.info1("Loading repo '%s'", repo_name) # create yumconf self.yumconf[repo_name] = {} self.yumconf[repo_name]["baseurl"] = [ "%s/%s" % (self.url, repo) ] if self.exclude: self.yumconf[repo_name]["exclude"] = self.exclude _repo = getRepoDB(rpmconfig, self.yumconf, reponame=repo_name) self.repos[repo_name] = _repo if not _repo.read(): log.error("Could not load repository '%s'.", repo_name) return 0 else: # RHEL <= 4 # FC repo = self.release self.yumconf[repo] = {} self.yumconf[repo]["baseurl"] = [self.url] if self.exclude: self.yumconf[repo]["exclude"] = self.exclude _repo = getRepoDB(rpmconfig, self.yumconf, reponame=repo) self.repos[repo] = _repo if not _repo.read(): log.error("Could not load repository '%s'.", repo) return 0 if not _repo.comps: # every source repo has to have a comps log.error("Missing comps file for '%s'.", repo) return 0 self.base_repo_names = self.repos.keys() if not ks.has_key("repo"): return 1 for repo in ks["repo"]: if repo in self.repos: log.error("Repository '%s' already defined.", repo) return 0 log.info1("Loading repository '%s'", repo) self.yumconf[repo] = {} url = ks["repo"][repo]["baseurl"] if url[:6] == "nfs://": d = "%s/%s" % (dir, repo) create_dir("", d) url = mount_nfs(url, d) self.yumconf[repo]["baseurl"] = [url] if ks["repo"][repo].has_key("exclude"): self.yumconf[repo]["exclude"] = ks["repo"][repo]["exclude"] if ks["repo"][repo].has_key("mirrorlist"): self.yumconf[repo]["mirrorlist"] = \ ks["repo"][repo]["mirrorlist"] _repo = getRepoDB(rpmconfig, self.yumconf, reponame=repo) self.repos[repo] = _repo if not _repo.read(): log.error("Could not load repository '%s'.", repo) return 0 return 1
_card = ks["xconfig"]["card"] _driver = dict["driver"] if dict.has_key("options"): _options.extend(dict["options"]) else: log.error("Card not found in hardware database.") if not _card and ks["xconfig"].has_key("driver"): if os.path.exists(buildroot+'/usr/share/hwdata/videodrivers'): # There is no usable name in the videodrivers file, so fake it _driver = ks["xconfig"]["driver"] _card = _driver + ' (generic)' if not _card or not _driver: if ks["xconfig"].has_key("card") and \ not os.path.exists(buildroot+'/usr/share/hwdata/Cards'): log.warning("Cards database not found in '/usr/share/hwdata'.") if ks["xconfig"].has_key("driver") and \ not os.path.exists(buildroot+'/usr/share/hwdata/videodrivers'): log.warning("videodrivers database not found in " "'/usr/share/hwdata'.") log.info1("Using default X driver configuration.") else: card = _card driver = _driver options = _options if ks["xconfig"].has_key("videoram"): videoram = ks["xconfig"]["videoram"] _monitor = None _hsync = None _vsync = None
stored_location) # No logic for Country - just set it now already _set_dict('country', openmaps_response['address'], 'Country', stored_location) """ Logic and options """ if 'error' not in openmaps_response: _set_attributes(stored_location, '_city', '_name', '_neighbourhood') _set_attributes(stored_location, '_town', '_name') _set_attributes(stored_location, '_state', '_state_district', '_name', '_county') _set_attributes(stored_location, '_county', '_name', '_village') return stored_location if __name__ == "__main__": # This will moved to the pytests # This function should contain just the main if len(sys.argv) >= 2: try: [lat, lon] = sys.argv[1].split(",") except KeyError as e: log.error("Key {} not found".format(str(e))) else: log.warning( "Please provide the latitude and longitude as a sinle parameter \ separated by comma(,)\n EX: python3 openmaps.py lat,long")
def network_config(ks, buildroot): if not os.path.exists(buildroot+"/etc/sysconfig/network-scripts"): os.mkdir(buildroot+"/etc/sysconfig/network-scripts") # generate loopback network configuration if it does not exist if not os.path.exists(buildroot+\ "/etc/sysconfig/network-scripts/ifcfg-lo"): log.info1("Adding missing /etc/sysconfig/network-scripts/ifcfg-lo.") create_file(buildroot, "/etc/sysconfig/network-scripts/ifcfg-lo", [ 'DEVICE=lo\n', 'IPADDR=127.0.0.1\n', 'NETMASK=255.0.0.0\n', 'NETWORK=127.0.0.0\n', "# If you're having problems with gated making 127.0.0.0/8 a martian,\n", "# you can change this to something else (255.255.255.255, for example)\n", 'BROADCAST=127.255.255.255\n', 'ONBOOT=yes\n', 'NAME=loopback\n' ]) _hostname = None _gateway = None if ks.has_key("network") and len(ks["network"]) > 0: # check network devices and set device for entries where no device # is specified network_devices = [ ] for net in ks["network"]: if net.has_key("device"): if net["device"] in network_devices: log.warning("'%s' is not unique.", net["device"]) else: network_devices.append(net["device"]) for net in ks["network"]: # get device or next free device device = None if net.has_key("device"): device = net["device"] if not device: i = 0 device = "eth%d" % i while device in network_devices: i += 1 device = "eth%d" % i net["device"] = device for net in ks["network"]: if not _hostname and net.has_key("hostname"): _hostname = 'HOSTNAME=%s\n' % net["hostname"] if not _gateway and net.has_key("gateway"): _gateway = 'GATEWAY=%s\n' % net["gateway"] device = net["device"] if device[:3] == "ctc": type = "CTC" elif device[:4] == "iucv": type = "IUCV" elif device[:2] == "tr": type = '"Token Ring"' else: type = "Ethernet" bootproto = "none" if net["bootproto"] and net["bootproto"] != "static": bootproto = net["bootproto"] try: fd = open(buildroot + \ "/etc/sysconfig/network-scripts/ifcfg-%s" % \ device, "w") except Exception, msg: log.error("Configuration of '/etc/sysconfig/network-scripts/" "ifcfg-%s' failed: %s", device, msg) else: fd.write('DEVICE=%s\n' % device) fd.write('BOOTPROTO=%s\n' % bootproto) if net.has_key("gateway"): fd.write('GATEWAY=%s\n' % net["gateway"]) if net.has_key("netmask"): fd.write('NETMASK=%s\n' % net["netmask"]) if net.has_key("ip"): fd.write('IPADDR=%s\n' % net["ip"]) if net.has_key("essid"): fd.write('ESSID=%s\n' % net["essid"]) if net.has_key("ethtool"): fd.write('ETHTOOL_OPTS=%s\n' % net["ethtool"]) if net.has_key("class"): fd.write('DHCP_CLASSID=%s\n' % net["class"]) if net.has_key("onboot"): fd.write('ONBOOT=%s\n' % net["onboot"]) else: fd.write('ONBOOT=yes\n') fd.write('TYPE=%s\n' % type) fd.close() if net.has_key("wepkey"): try: fd = open(buildroot + \ "/etc/sysconfig/network-scripts/keys-%s" % \ device, "w") except Exception, msg: log.error("Configuration of '/etc/sysconfig/network-" "scripts/keys-%s' failed: %s", device, msg) else: fd.write('KEY=%s\n' % net["wepkey"]) fd.close()
def network_config(ks, buildroot): if not os.path.exists(buildroot + "/etc/sysconfig/network-scripts"): os.mkdir(buildroot + "/etc/sysconfig/network-scripts") # generate loopback network configuration if it does not exist if not os.path.exists(buildroot+\ "/etc/sysconfig/network-scripts/ifcfg-lo"): log.info1("Adding missing /etc/sysconfig/network-scripts/ifcfg-lo.") create_file(buildroot, "/etc/sysconfig/network-scripts/ifcfg-lo", [ 'DEVICE=lo\n', 'IPADDR=127.0.0.1\n', 'NETMASK=255.0.0.0\n', 'NETWORK=127.0.0.0\n', "# If you're having problems with gated making 127.0.0.0/8 a martian,\n", "# you can change this to something else (255.255.255.255, for example)\n", 'BROADCAST=127.255.255.255\n', 'ONBOOT=yes\n', 'NAME=loopback\n' ]) _hostname = None _gateway = None if ks.has_key("network") and len(ks["network"]) > 0: # check network devices and set device for entries where no device # is specified network_devices = [] for net in ks["network"]: if net.has_key("device"): if net["device"] in network_devices: log.warning("'%s' is not unique.", net["device"]) else: network_devices.append(net["device"]) for net in ks["network"]: # get device or next free device device = None if net.has_key("device"): device = net["device"] if not device: i = 0 device = "eth%d" % i while device in network_devices: i += 1 device = "eth%d" % i net["device"] = device for net in ks["network"]: if not _hostname and net.has_key("hostname"): _hostname = 'HOSTNAME=%s\n' % net["hostname"] if not _gateway and net.has_key("gateway"): _gateway = 'GATEWAY=%s\n' % net["gateway"] device = net["device"] if device[:3] == "ctc": type = "CTC" elif device[:4] == "iucv": type = "IUCV" elif device[:2] == "tr": type = '"Token Ring"' else: type = "Ethernet" bootproto = "none" if net["bootproto"] and net["bootproto"] != "static": bootproto = net["bootproto"] try: fd = open(buildroot + \ "/etc/sysconfig/network-scripts/ifcfg-%s" % \ device, "w") except Exception, msg: log.error( "Configuration of '/etc/sysconfig/network-scripts/" "ifcfg-%s' failed: %s", device, msg) else: fd.write('DEVICE=%s\n' % device) fd.write('BOOTPROTO=%s\n' % bootproto) if net.has_key("gateway"): fd.write('GATEWAY=%s\n' % net["gateway"]) if net.has_key("netmask"): fd.write('NETMASK=%s\n' % net["netmask"]) if net.has_key("ip"): fd.write('IPADDR=%s\n' % net["ip"]) if net.has_key("essid"): fd.write('ESSID=%s\n' % net["essid"]) if net.has_key("ethtool"): fd.write('ETHTOOL_OPTS=%s\n' % net["ethtool"]) if net.has_key("class"): fd.write('DHCP_CLASSID=%s\n' % net["class"]) if net.has_key("onboot"): fd.write('ONBOOT=%s\n' % net["onboot"]) else: fd.write('ONBOOT=yes\n') fd.write('TYPE=%s\n' % type) fd.close() if net.has_key("wepkey"): try: fd = open(buildroot + \ "/etc/sysconfig/network-scripts/keys-%s" % \ device, "w") except Exception, msg: log.error( "Configuration of '/etc/sysconfig/network-" "scripts/keys-%s' failed: %s", device, msg) else: fd.write('KEY=%s\n' % net["wepkey"]) fd.close()
ver = int( ver_txt.strip() ) # success! return ver except ValueError, ve: # not an int log.warn("Not an integer: '%s'" % version_file_path ) except Exception, e: # can't read log.warn("Failed to read '%s': %s" % (version_file_path)) if not try_remote: if conf is None: log.warning("No config found; cannot load version for '%s'" % data_id) elif metadata_dir is None: log.warning("No metadata directory found; cannot load version for '%s'" % data_id) return None # if we got here, then we need to fetch remotely existing_data = get_mutable(name, data_id) if existing_data is None: # nope return None if existing_data.has_key('error'):
def do_stuff(infinite=True): db = DB(host=DB_HOST, port=DB_PORT, user=DB_USER, passwd=DB_PASS, name=DB_NAME) rmq = Rmq(host=RMQ_HOST, port=RMQ_PORT, user=RMQ_USER, passw=RMQ_PASS) pexel = PyPexels(api_key=API) log.info('started') banned = False while True: try: log.info('Attempting to get photos!') entries = set(pexel.random(per_page=PER_PAGE).entries) banned = False except PexelsError: word = "Still" if banned else "Got" log.warning(f'{word} banned on pexels, waiting 5 min.') banned = True for _ in range(6): rmq.connection.process_data_events() sleep(50) continue rejected_pics = db.seen_pictures for photo in entries: source = photo.src["original"] if source in rejected_pics: log.info(f'Already seen this({source}) picture!') continue db.add_seen_pic(source) pic = PictureValid(service="Pexels", download_url=photo.src["original"], preview_url=photo.src["large"], source_url=photo.url, height=int(photo.height), width=int(photo.width)) if photo.height > photo.width: log.info( 'not adding this pic because height > weight, for now') log.debug(f' height = {photo.height}\nwidth={photo.width}') continue log.info(f'Adding {pic}!') rmq.channel.basic_publish(exchange='', routing_key='check_out', body=pic.json(), properties=rmq.durable) if not infinite: return int_range = range(0, INTERVAL, 20) for _ in int_range: how_many = len(list(int_range)) rmq.connection.process_data_events() sleep(INTERVAL / how_many)