コード例 #1
0
 def open_add_entry_window(self):
     if AugmentedMaps.debug:
         print('Opening an image map')
     self.__entryWindow = Preparation(self.database)
     pos = self.frameGeometry().topLeft()
     self.__entryWindow.move(pos.x() + 20, pos.y() + 20)
     self.__entryWindow.show()
コード例 #2
0
ファイル: preprocess_hncm.py プロジェクト: yangliuy/HybridNCM

if __name__ == '__main__':
    if len(sys.argv) < 4:
        print 'please input params: basedir need_preprocess (1 need/ 0 no need. default 0) save_space (1 need/ 0 no need. default 1)'
        exit(1)

    basedir = sys.argv[1]
    need_preprocess = sys.argv[2]
    save_space = sys.argv[3]
    # basedir = '../../../data/twitter/ModelInput-seq2seq-facts-mix-ret/'

    # transform context/response pairs into corpus file and relation file
    # the input files are train.txt/valid.txt/test.txt
    # the format of each line is 'label \t context \t response'
    prepare = Preparation()
    # run with three files (train.txt.mz, valid.txt.mz, test.txt.mz) to generate unique ids
    # for q/d in train/valid/test data. Since we will merge these three corpus files into a single file later
    corpus, rels_train, rels_valid, rels_test = run_with_train_valid_test_corpus_given_qid_did_gen_unique_id_for_genres(
        basedir + 'train.mz', basedir + 'valid.mz', basedir + 'test.mz')

    for data_part in list(['train', 'valid', 'test']):
        if data_part == 'train':
            rels = rels_train
        elif data_part == 'valid':
            rels = rels_valid
        else:
            rels = rels_test
        print 'total relations in ', data_part, len(rels)
        prepare.save_relation(basedir + 'relation_' + data_part + '.txt', rels)
        if save_space == '0':
コード例 #3
0
    N_IN = 5  # number of date for training
    N_OUT = 1  # number of date for predict

    PARTICLE = 150  # number of PSO particle
    ITERATION = 7  # number of PSO iteration
    C1 = 2.985
    C2 = 1.066
    W = 0.000482

    # setup baseline model
    ann = ANN(epochs=50, batch=13, n_in=N_IN, n_out=N_OUT)

    # prepare train data
    read_data = pd.read_csv(TRAIN_PATH)
    preparation = Preparation(df=read_data)
    data = preparation.calculate_per_change()

    # create indicator
    indicator = Indicator(data)
    indicator_data = indicator.RSI()
    indicator_data = indicator.EMA()
    indicator_data = indicator.MACD()
    indicator_data.dropna(inplace=True)

    indicator_data['Change of EMA'] = (
        (indicator_data['Close'] - indicator_data['ema_5_day']) /
        indicator_data['ema_5_day']) * 100
    data_set = indicator_data[['rsi', 'Histogram', 'Change of EMA', 'change']]
    ann.split_data_scale_transform(data_set)
コード例 #4
0
ファイル: tfidf.py プロジェクト: WhiteWalker72/PoliFLW
 def __init__(self):
     self.preparation = Preparation()
コード例 #5
0
ファイル: main.py プロジェクト: olehsinkevych/design_patterns
from customization import Customization
from preparation import Preparation

while True:
    factory_cappuccino = CappuccinoFactory()
    factory_black_coffee = BlackCoffeeFactory()
    factory_lemon = LemonadeFactory()
    factory_milk = HotMilkFactory()
    factory_coca_cola = CocaColaFactory()

    cust = Customization(float(input("Extra milk - ")),
                         float(input("Sugar - ")), float(input("Mug size - ")))
    cust = (cust.extra_milk, cust.sugar, cust.mug_size)

    prep = Preparation(float(input("Milk - ")), float(input("Water - ")),
                       float(input("Sugar - ")), float(input("Coke - ")),
                       float(input("Coffee - ")), float(input("Flavour - ")),
                       float(input("Tea - ")))
    prep = (prep.milk, prep.water, prep.sugar, prep.coke, prep.liquid_coffee,
            prep.added_flavour, prep.tea)

    cappuccino = factory_cappuccino.get_product()
    black_coffee = factory_black_coffee.get_product()
    lemon = factory_lemon.get_product()
    milk = factory_milk.get_product()
    coca_cola = factory_coca_cola.get_product()

    cappuccino.make(cappuccino, cust, prep)
    cappuccino.set_milk()
    cappuccino.set_sugar()
    cappuccino.set_coffee()
    print("\n")
コード例 #6
0
    # with warnings.catch_warnings():
    #	warnings.simplefilter("ignore")
    #	Instance.fxn()
    parser = OptionParser()
    for option in options:
        param = option['name']
        del option['name']
        parser.add_option(*param, **option)
    start = timer()
    options, arguments = parser.parse_args()
    main(options, arguments)

'''

if __name__ == "__main__":
    prepared = Preparation()
    servant = Servant()
    opt = prepared.get_options()
    options, args = prepared.read_options(opt)
    start = timer()
    main(options, args, prepared, servant)

    #exit()
    end_analysis = timer()

    servant.extractCSV()

    start_fv = timer()
    data_set, feature_vector = servant.getInputData(prepared)
    end_fv = timer()
    conf = 'model_training.config'
コード例 #7
0
 def getting_variables_from_preparation(self):
     self.max_number_of_players, self.distance_between_starts, self.players, self.rules, \
         self.dice = Preparation().main()
コード例 #8
0
ファイル: analysis.py プロジェクト: WhiteWalker72/PoliFLW
 def __init__(self):
     self.preparation = Preparation()
     self.tfidf = TFIDF()
コード例 #9
0
def run(zipPath):
    folderName = ZipExtract().get_zip_file(zipPath)
    data = Preparation(folderName).getInstance()
    data.train_test_move_to_dirs(data.train_test_separation
                                 (data.train_test_make_dirs()))
        r = line.strip().split()
        word_dict[r[1]] = r[0]
    return word_dict

if __name__ == '__main__':
    if len(sys.argv) < 2:
        print 'please input params: data_name search_field_label'
        exit(1)
    data_name = sys.argv[1] # udc or ms_v2
    search_field_label = sys.argv[2] # body or title

    basedir = '../../data/' + data_name + '/ModelInput/'
    cur_data_dir = basedir + 'dmn_prf_model_input_' +  search_field_label + '/'
    if not os.path.exists(cur_data_dir):
        os.makedirs(cur_data_dir)
    prepare = Preparation() # train_rexpand_ctab_titleText.txt
    train_file = 'train_rexpand_ctab_' + search_field_label + 'Text.txt'
    valid_file = 'valid_rexpand_ctab_' + search_field_label + 'Text.txt'
    test_file = 'test_rexpand_ctab_' + search_field_label + 'Text.txt'

    corpus, rels_train, rels_valid, rels_test = prepare.run_with_train_valid_test_corpus_dmn(
        basedir + train_file, basedir + valid_file,
        basedir + test_file)
    for data_part in list(['train', 'valid', 'test']):
        if data_part == 'train':
            rels = rels_train
        elif data_part == 'valid':
            rels = rels_valid
        else:
            rels = rels_test
        print 'total relations in ', data_part, len(rels)