help='no transfer blocks from input file to output') parser.add_argument('-c', '--covering', dest='covering', type=int, default=1, help='amount of column which should be covered') parser.add_argument('-l', '--result-limit', dest='results_limit', type=int, default=10, help='maximal amount of result') args = parser.parse_args() input_file = commonlib.DataFile() input_file.load(args.input) feature_count_penalty_koef = 1.0 / (input_file.features_count - args.covering + 1) covering_penalty_koef = 1.0 - feature_count_penalty_koef def fitness(individual): res = 1 - (sum(individual)-1)/(input_file.features_count-1) * feature_count_penalty_koef\ - (input_file.uim_count - calc_cover(individual, args.covering)) / input_file.uim_count * covering_penalty_koef return res, creator.create("FitnessMax", base.Fitness, weights=(1.0, )) creator.create("Individual", list, fitness=creator.FitnessMax)
#!/usr/bin/env python # -*- coding: utf-8 -*- import argparse import sys import commonlib parser = argparse.ArgumentParser() parser.add_argument('input', help='file for check') parser.add_argument('reference', help='file with reference uim') args = parser.parse_args() input_file = commonlib.DataFile() input_file.load(args.input) reference_file = commonlib.DataFile() reference_file.load(args.reference) if input_file.features_count != reference_file.features_count: print("feature_count differs") sys.exit(1) if input_file.uim_count != reference_file.uim_count: print("uim_count differs") sys.exit(1) if input_file.uim_weights: if input_file.uim_weights != reference_file.uim_weights: print("uim_weights differs") sys.exit(1)
for i in range(args.features): features_count[i % len(features_count)] += 1 for i in range(len(features_count)): for j in range(features_count[i]): generator.add_feature( genlib.Feature(genlib.Feature.kinds[i], maximal=random.randint(1, args.maximal), absence_probability=random.random() * args.absence_probability)) generator.set_patterns_length(1) for i in range(args.patterns): generator.add_pattern(genlib.Pattern([i])) generator.initialize(True) features = [] pfeatures = [] for obj in generator.generate(args.objects): features.append(obj[0]) pfeatures.append(obj[1]) datafile = commonlib.DataFile() datafile.features = features datafile.pfeatures = pfeatures datafile.features_min = [x.minimal for x in generator.features] datafile.features_max = [x.maximal for x in generator.features] datafile.bake() datafile.save(args.output)
# -*- coding: utf-8 -*- import argparse import json import commonlib import sys parser = argparse.ArgumentParser() parser.add_argument('input', help='input file for statistic calculation') parser.add_argument('--format', help='output format', choices=['plain', 'json'], default='plain') args = parser.parse_args() data = commonlib.DataFile() data.load(args.input) result = [] result.append(('lset_count', data.lset_count)) result.append(('features_count', data.features_count)) result.append(('pfeatures_count', data.pfeatures_count)) result.append(('patterns', len(data.patterns))) result.append(('features_min', data.features_min)) result.append(('features_max', data.features_max)) # skips = [0 for x in range(data.features_count)] for obj in data.features: for i in range(data.features_count):