def main():
    # Parse and print arguments
    args = make_args_parser()
    print_args(args)
    # Load input file as well as the timestamps
    X, ts = util.load_file(args.input)
    # Load and compile model
    model = util.load_and_compile_model(MODEL_PATH)
    # Print model summary
    print("------------------------- Model Summary -------------------------")
    model.summary()
    # Predict model output
    y_pred = model.predict(X, batch_size=32)
    # Load actual values
    y_true, _ = util.load_file(ACTUAL_PATH)
    # Compute MAE, MAPE and MSE
    mae = util.compute_mae(X, y_true, model)
    mape = util.compute_mape(y_true, y_pred, model)
    mse = util.compute_mse(X, y_true, model)
    # Check if file already exists. If so delete it
    if os.path.exists(PREDICTED_PATH):
        os.remove(PREDICTED_PATH)
    # Open file an write from scratch
    with open(PREDICTED_PATH, 'a') as file:
        file.write('MAE: {:.4f}\tMAPE: {:.4f}\tMSE: {:.4f}\n'.format(
            mae, mape, mse))
        df = pd.concat([ts, pd.DataFrame(y_pred)], axis=1)
        df.to_csv(file, index=False, header=False, sep='\t', encoding='utf-8')
def save_output_file(path_name, file_name):
    """Reads all of the data from a single run and saves to a .mat file."""

    converted_data = load_file('%s%s.p'%(path_name, file_name))
    filtered_data = load_file('%s%s_filtered.p'%(path_name, file_name))
    force_data = load_file('%s%s_forces.p'%(path_name, file_name))

    # Save all the data in a useful way.
    if force_data['dim'] == 2:
        converted_data['z'] = []
    output_data = {'config': filtered_data['q'],
                   'velocity': filtered_data['v'],
                   'acceleration': filtered_data['a'],
                   'reference_config': filtered_data['ref'],
                   'dimension': force_data['dim'],
                   'time': force_data['t'],
                   'link_length': filtered_data['link_length'],
                   'contact_point': filtered_data['cp'],
                   'reaction_forces': {'static': force_data['static'],
                                       'dynamic': force_data['dyn'], 
                                       'discrete': force_data['discrete']},
                   'sampled_points': {'x': converted_data['x'],
                                      'y': converted_data['y'],
                                      'z': converted_data['z']}}
    sio.savemat('%s%s_output.mat' %(path_name, file_name), output_data)
Пример #3
0
def test_alm_merge_basic(info):
    if info.verbose:
        print("")  # fixes display for later...
    info.run_merge()
    exp = util.load_file(info.expected)
    res = util.load_file(info.result)
    util.file_compare(exp, res)
    info.cleanup()
Пример #4
0
def test_calfactors_basic(info):
    # if not stopped:
    #     import pdb; pdb.set_trace()
    #     global stopped
    #     stopped = True
    info.run_merge()
    exp = util.load_file(info.expected)
    res = util.load_file(info.result)
    compare(exp, res)
    info.cleanup()
Пример #5
0
def kdw_reset_model_and_deck(col):
    util.remove_model_and_deck(col, KDW_MODEL, KDW_DECK, log)
    deck_id = col.decks.id(KDW_DECK)
    model = col.models.new(KDW_MODEL)
    model['did'] = deck_id
    model['css'] = util.load_file('kdw.css', log) or model['css']
    col.models.add(model)

    col.models.addField(model, col.models.newField('Kanji'))
    col.models.addField(model, col.models.newField('Furigana'))
    col.models.addField(model, col.models.newField('Meaning'))
    col.models.addField(model, col.models.newField('Examples'))

    tmpl_read = col.models.newTemplate('Read')
    tmpl_read['qfmt'] = util.load_template_file('kdw', 'read', 'front',
                                                log) or ''
    tmpl_read['afmt'] = util.load_template_file('kdw', 'read', 'back',
                                                log) or ''
    tmpl_read['bqfmt'] = tmpl_read['qfmt']
    tmpl_read['bafmt'] = tmpl_read['afmt']
    col.models.addTemplate(model, tmpl_read)

    tmpl_meaning = col.models.newTemplate('Meaning')
    tmpl_meaning['qfmt'] = util.load_template_file('kdw', 'meaning', 'front',
                                                   log) or ''
    tmpl_meaning['afmt'] = util.load_template_file('kdw', 'meaning', 'back',
                                                   log) or ''
    tmpl_meaning['bqfmt'] = tmpl_meaning['qfmt']
    tmpl_meaning['bafmt'] = tmpl_meaning['afmt']
    col.models.addTemplate(model, tmpl_meaning)
    col.save()

    return model, col.decks.get(deck_id)
Пример #6
0
	def on_btnGetCols_clicked(self,widget):
		print ''' sql 을 이용하여 conlumn정보 가져오기'''
		self.tbColEng.set_text('')
		self.tbColHan.set_text('')

		bounds = self.tbSql.get_selection_bounds();
		if( len(bounds) < 2):
			print "no text is selected"
			self.log("no text is selected\n")
			tb = self.tbSql
			sql = self.tbSql.get_text(tb.get_start_iter(), tb.get_end_iter())
		else:
			sql = self.tbSql.get_text(bounds[0],bounds[1]);
		sql = self.rmSqlComment(sql)
		self.log('SQL : ' + sql + '\n');
		print sql

		try:
			dbCols = DbCols()
			dbCols.con_str = self._get_con_str()
			cols = dbCols.get_cols(sql)
			
			tb = self.tbColEng
			assert isinstance(tb , gtk.TextBuffer)
			tb.set_text(cols)
			
			mydict = util.load_file("dict.txt")
			names = cols.split(',')
			hannames = util.eng_to_han(mydict, names)
			self.tbColHan.set_text(hannames)
		except :
			tb = traceback.format_exc().decode('euckr')
			self.log("********* ERROR ********* \n")
			self.log(tb)
			raise
Пример #7
0
def plot_all_loss(folders):
    loss = util.load_file(folder, 'loss')
    for folder in folders:
        with open('{}/loss'.format(folder), 'rb') as f:
            loss = pickle.load(f)
    plt.legend()
    plt.show() 
Пример #8
0
def plot_loss(folders):
    for (dir_name, file_name) in folders:
        loss = util.load_file(dir_name, file_name)
        plt.plot(loss, label=labels[dir_name])
    plt.xlabel('Number of epochs')
    plt.ylabel('Training Loss')
    #plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
    plt.legend()
    plt.show()
Пример #9
0
def plot_accuracy(folders):
    for (dir_name, file_name) in folders:
        accs = util.load_file(dir_name, file_name)
        plt.plot(accs, label=labels[dir_name])
    plt.xlabel('Number of epochs')
    plt.ylabel('SVHN Test Accuracy')
    #plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
    plt.legend(loc=4)
    #plt.show()
    plt.savefig('fc_accs.png')
Пример #10
0
def info(path):
    n_sample, sign, density, double_occ, sweep, n_sweep = \
        util.load_file(path, "meas_eqlt/n_sample", "meas_eqlt/sign",
                             "meas_eqlt/density", "meas_eqlt/double_occ",
                             "state/sweep", "params/n_sweep")
    print(f"n_sample={n_sample}, sweep={sweep}/{n_sweep}")
    if n_sample > 0:
        print(f"<sign>={(sign/n_sample)}")
        print(f"<n>={(density/sign)}")
        print(f"<m_z^2>={((density-2*double_occ)/sign)}")
Пример #11
0
    def __init__(self, projdir):
        for file in os.listdir(projdir):
            if file.endswith('.all'):
                df = util.load_file(projdir, file)

                errors = self.get_errors(df)

                # write file
                output = file[:-4] + '_errors.csv'
                util.write_file(errors, projdir, output,
                                cols=TError.get_columns())
Пример #12
0
	def get_vofields(self,sql):
		cols = self._get_cols(sql)
		
		mydict = util.load_file("dict.txt")
		
		s = ''
		for col in cols:
			(name, t,precision,scale) = col[0],col[1], col[4],col[5]
			tstr = self.get_type_str(t,precision,scale)
			camelstr = util.underscore_to_camel(name) + ' ;'
			hanname = util.eng_to_han_one(mydict, name)
			s += "private %-12s %-30s // %s\n" % (tstr, camelstr, hanname)
		return s;
Пример #13
0
	def on_btnGenSql_clicked(self,widget):
		''' SQL생성.'''
		tb = self.tbColEng
		mydict = util.load_file("dict.txt")
		assert isinstance(tb , gtk.TextBuffer)
		s = tb.get_text(tb.get_start_iter(), tb.get_end_iter())
		cols = util.split(s)
		#self.log(crud.mk_insert(cols))
		#self.log('\n\n')
		self.log(crud.mk_insert2(mydict, cols))
		self.log('\n\n')
		self.log(crud.mk_update(mydict, cols))
		self.log('\n\n')
		self.log(crud.mk_select(mydict, cols))
		self.log('\n\n')
Пример #14
0
	def on_btnGenChkCode_clicked(self,widget):
		''' 체크코드 생성 버튼 처리.'''
		mydict = util.load_file("dict.txt")
		tb = self.tbColHan
		assert isinstance(tb , gtk.TextBuffer)
		s = tb.get_text(tb.get_start_iter(), tb.get_end_iter())
		o = bindItemToVar.BindItemVar(s,mydict)
		self.log(o.getVarStr())
		self.log('\n\n')
		self.log(o.getChkStr())
		self.log('\n\n')
		self.log(o.getChkStr2())
		self.log('\n\n')
		self.log(o.getChkStr3())
		self.log('\n\n')
Пример #15
0
    def __init__(self, date, news_limit=5, net_limit=50):
        self.section = util.load_file("section.txt")
        self.date = date
        self.news_limit = news_limit
        self.net_limit = net_limit
        self.refer = 0

        self.mecab = Mecab()
        self.exp = re.compile("NN|XR|VA|VV|MAG|VX")

        self.temp_net = {}
        self.temp_list = {}
        self.word_net = []  # relative word and its frequency
        self.word_list = []  # total word and its frequency (using for PMI)
        self.news = []  # top # of news
        self.sentiment = [0, 0]  # [neg, pos]
        self.counter = [0 for i in range(16)]
Пример #16
0
	def __init__( self, date, news_limit = 5, net_limit = 50 ):
		self.section = util.load_file("section.txt")
		self.date = date
		self.news_limit = news_limit
		self.net_limit = net_limit
		self.refer = 0

		self.mecab = Mecab()
		self.exp = re.compile("NN|XR|VA|VV|MAG|VX")
		
		self.temp_net = {}
		self.temp_list = {}
		self.word_net = []	   # relative word and its frequency
		self.word_list = []	   # total word and its frequency (using for PMI)
		self.news = []		   # top # of news
		self.sentiment = [0, 0] # [neg, pos]
		self.counter = [ 0 for i in range(16) ]
Пример #17
0
def make_cfg():
    print ' * * * TShockweb configurator * * * '
    print ''
    print 'TShockweb requires a configuration file called "%s" to function. This file was not found, but we can make a configuration file for you. I just need some information about your environment, and I will generate a %s file. If you want to run this configurator again later, simply remove the %s file from the tshockweb folder.' % (props_file_name, props_file_name, props_file_name)
    for q in configs:
        print '\n\n************************************'
        print q.token
        print 'Description: %s \n' % (q.desc)
        ans = raw_input('Enter a value, or leave blank to accept the default of "%s" >>> ' % (q.default))
        ans = ans.replace('\\', '/').strip()
        if ans is not None and len(ans) > 0:
            q.value = ans

    print '\n\nGenerating tshockweb.properties...'
    default_config = util.load_file(default_config_file)
    tokens = make_cfg_dict(configs)
    modified_config = Template(default_config).safe_substitute(tokens)
    print modified_config
    print 'Done!'
Пример #18
0
def plot_all_kshot_for_exp(folder, kshots, subfolder, exp_type, num_epochs=None):
    plt.clf()
    acc = util.load_file(folder, 'inter_accuracy44')
    finals = list()
    for k in kshots:
        x_acc = [a[k] for a in acc]
        chunked = chunk(x_acc, SMOOTHING)
        finals.append(chunked)
         
    step_size = 25 * SMOOTHING  # Validation accuracy calculated every STEP_SIZE epochs.
    x = np.arange(1, len(finals[0]) * step_size, step_size)
    lsubfolder = subfolder.lower()
    if num_epochs:
        comb = list(filter(lambda y: y[0] <= num_epochs, list(zip(x, acc))))
        x = [c[0] for c in comb]
        acc = [c[1] for c in comb]
        assert len(x) == len(acc)
    for (i, f) in enumerate(finals):
        plt.plot(x, f, label=labels[kshots[i] + 1], linewidth=2, marker='.', \
                 markersize=6)
    #plt.plot(x, [baselines[lsubfolder][0]] * len(acc), label='Baseline 1st Instance' ,\
    #         linewidth=2) 
    #plt.plot(x, [baselines[subfolder][1]] * len(acc), label='Baseline 2nd Instance') 
    #plt.plot(x, [baselines[subfolder][4]] * len(acc), label='Baseline 5th Instance') 
    #plt.plot(x, [baselines[lsubfolder][6]] * len(acc), label='Baseline 7th Instance', \
    #         linewidth=2) 
    plt.ylim(0, 1.1)
    plt.xlabel('Number of epochs')
    plt.ylabel('% Accurate Test Set')
    """
    if subfolder == 'vgg':
        plt.title('Pre-trained VGG19 Controller')
    else:
        plt.title('Default Controller (No encoder)')
    """
    plt.title(subfolder)
    plt.legend(loc=2)
    #plt.show()
    plt.savefig('plots/{}_{}.png'.format(exp_type, subfolder))
Пример #19
0
def make_cfg():
    print ' * * * TShockweb configurator * * * '
    print ''
    print 'TShockweb requires a configuration file called "%s" to function. This file was not found, but we can make a configuration file for you. I just need some information about your environment, and I will generate a %s file. If you want to run this configurator again later, simply remove the %s file from the tshockweb folder.' % (
        props_file_name, props_file_name, props_file_name)
    for q in configs:
        print '\n\n************************************'
        print q.token
        print 'Description: %s \n' % (q.desc)
        ans = raw_input(
            'Enter a value, or leave blank to accept the default of "%s" >>> '
            % (q.default))
        ans = ans.replace('\\', '/').strip()
        if ans is not None and len(ans) > 0:
            q.value = ans

    print '\n\nGenerating tshockweb.properties...'
    default_config = util.load_file(default_config_file)
    tokens = make_cfg_dict(configs)
    modified_config = Template(default_config).safe_substitute(tokens)
    print modified_config
    print 'Done!'
def main():
    # parse and print arguments
    args = make_args_parser()
    print_args(args)
    # load input file as well as the timestamps
    X, ts = util.load_file(args.input)
    # load and compile model
    model = util.load_and_compile_model(MODEL_PATH)
    # print model summary
    print("------------------------- Model Summary -------------------------")
    model.summary()
    # Get new model having as output the first hidden of our pretrained model
    new_model = util.get_intermediate_layer_model(model, 'dense_1')
    # print model summary
    print("--------------- Intermediate Layer Model Summary ----------------")
    new_model.summary()
    # predict new model output
    y_pred = new_model.predict(X, batch_size=32)
    # Open file an write from scratch
    with open(OUTPUT_PATH, 'w') as file:
        df = pd.concat([ts, pd.DataFrame(y_pred)], axis=1)
        df.to_csv(file, index=False, header=False, sep='\t', encoding='utf-8')
Пример #21
0
	def on_btnDefMap_clicked(self,widget):
		mydict = util.load_file("dict.txt")
		tb = self.tbColHan
		assert isinstance(tb , gtk.TextBuffer)
		s = tb.get_text(tb.get_start_iter(), tb.get_end_iter())
		a = s.splitlines()
		if( len(a) <2 ):
			a = util.split(s)
		msg = 'SQL의 SELECT구문을 이용하기 바랍니다\n\n'
		msg += 'BufMap defValMap = new BufMap();\n'
		msg += 'defValMap\n'
		for l in a:
			l = re.sub('/\*.*\*/','',l)
			l = l.strip()
			t = self.get_token(l)
			engname = t
			t = '"' + t + '"'
			hanname = util.eng_to_han_one(mydict,engname )
			msg += '\t\t.put(%-30s,"0")\t\t// %s\n' %(t,hanname)
		msg += '\t\t;\n'
		self.log("\n\n")
		self.log(msg)
		self.log("\n\n")
Пример #22
0
def plot_single_kshot_for_exps(kshot, folders, subfolders, exp_type, num_epochs=None):
    plt.clf()
    step_size = 25
    for (subfolder, folder) in zip(subfolders, folders):
        print(kshot, subfolder)
        acc = util.load_file(folder, 'inter_accuracy44')
        x = np.arange(1, len(acc) * step_size, step_size * SMOOTHING)
        if subfolder=='VGG19' and kshot == 9:
            k_acc = [a[6] for a in acc]
        elif subfolder=='VGG19' and kshot == 7:
            k_acc = [a[5] for a in acc]
        else:
            k_acc = [a[kshot] for a in acc]
        chunked = chunk(k_acc, SMOOTHING)
        if num_epochs:
            comb = list(filter(lambda y: y[0] <= num_epochs, list(zip(x, chunked))))
            x = [c[0] for c in comb]
            chunked = [c[1] for c in comb]
            assert len(x) == len(chunked)
        if subfolder in labels:
            label = labels[subfolder]
        else:
            label = subfolder
        plt.plot(x, chunked, label=label, linewidth=2, marker='.', markersize=6)
    if exp_type == 'difficulty':
        plt.plot(x, [baselines[subfolder][kshot]] * len(chunked), label='Baseline')
    else:
        x = np.arange(1, num_epochs, step_size * SMOOTHING)
        #  set_trace()
        plt.plot(x, [baselines['all'][kshot]] * len(chunked), label='Baseline')
    plt.ylim(0.0, 0.8)
    plt.xlabel('Number of epochs')
    plt.ylabel('% Accurate Test Set')
    plt.title("{}-Shot Accuracy".format(kshot))
    plt.legend(loc=2)
    #plt.show()
    plt.savefig('plots/{}_{}.png'.format(exp_type, kshot))
Пример #23
0
	def on_btnSqlComment_clicked(self,widget):
		'''코멘트를 이용하여 SQL을 생성함.'''
		tb = self.tbSql
		mydict = util.load_file("dict.txt")
		assert isinstance(tb , gtk.TextBuffer)
		s = tb.get_text(tb.get_start_iter(), tb.get_end_iter())
		s = s.strip().upper()
		prefix = None
		if s.find('.') >= 0:
			(prefix, s ) = s.split('.')
		constr = self._get_con_str()
		self.log('연결문자열:' + constr + '\n')
		self.log('테이블명  :' + s + '\n')
		m = utildb.get_comment_info(constr, s.upper())
		#self.log(crud.mk_insert(cols))
		#self.log('\n\n')
		self.log(crud2.mk_insert2(mydict,m['dic'],m['cols']))
		self.log('\n\n')
		self.log(crud2.mk_update(mydict, m['dic'],m['cols']))
		self.log('\n\n')
		self.log(crud2.mk_select(mydict, m['dic'],m['cols'],prefix))
		self.log('\n\n')
		self.log(crud2.mk_select_insert(mydict, m['dic'],m['cols']))
		self.log('\n\n')
Пример #24
0
    cleaned_text = re.sub(remove, '', cleaned_text)
    tokens = cleaned_text.split()

    tokens = [word.lower() for word in tokens if word != '' and word.isalpha()]

    return tokens


def sequence_tokens(tokens, sequence_length=50):
    length = sequence_length + 1
    sequences = []
    token_length = len(tokens)
    for i in range(length, token_length):
        sequence = tokens[i - length:i]

        sequences.append(' '.join(sequence))

    return sequences


def save_sequences(sequences, filename):
    data = '\n'.join(sequences)
    with open(filename, 'w') as file:
        file.write(data)


if __name__ == "__main__":
    filename = 'plato.txt'
    text = load_file(filename)
    save_sequences(sequence_tokens(clean_tokenize_text(text)), 'sequences.txt')
Пример #25
0
np.random.seed(231)
################################################################################
# Load CV Data
################################################################################
print("\nReading data ...")
DATA_DIR = 'data/CVSplits/'
num_folds = 5

data_cv = []
label_cv = []
max_col_prior = 0

# First get what is the maximum number of features across all folds
for i in range(num_folds):
    _, _, max_col_prior = util.load_file(
        DATA_DIR + 'training0' + str(i) + '.data', max_col_prior)

#print(max_col_prior)

for i in range(num_folds):
    data_fold, label_fold, max_col_prior = util.load_file(
        DATA_DIR + 'training0' + str(i) + '.data', max_col_prior)
    data_cv.append(data_fold)
    label_cv.append(label_fold)

################################################################################
# Load Train and Test Data
################################################################################
DATA_DIR = 'data/'
data_tr, label_tr, max_col_prior = util.load_file(DATA_DIR + 'train.liblinear',
                                                  max_col_prior)

if __name__ == "__main__":
    parser = argparse.ArgumentParser()

    parser.add_argument('--src_path', type=str, required=True)
    parser.add_argument('--output_path', type=str, required=True)
    parser.add_argument('--conceptnet_path', type=str, required=True)

    args = parser.parse_args()
    src_path = args.src_path
    output_path = args.output_path
    conceptnet_path = args.conceptnet_path

    print("Loading src and output files from ", src_path, output_path)
    src = load_file(src_path)
    output = load_file(output_path)
    assert len(src) == len(output)

    # load conceptnet
    print("loading conceptnet from {0}...".format(conceptnet_path))
    CN = load_pickle(conceptnet_path)
    CN_concepts = set(get_all_entities(CN))
    CN_concept_pairs = set([(h, t) for h, r, t, w in CN])
    print("number of conceptnet triplets: {0}".format(len(CN)))
    print("number of conceptnet entities: {0}".format(len(CN_concepts)))

    # load stopwords
    stopwords = load_pickle("./data/KB/stopwords.pkl")

    print()
    # emotion_lexicon = load_pickle(emotion_lexicon_file)
    CN = load_pickle(conceptnet_path) # the augmented conceptnet
    associated_concepts = defaultdict(list)
    for h,r,t,w in tqdm(CN):
        associated_concepts[h].append((r,t,w))
    # vocab_embedding = torch.load(dataset_vocab_embedding_path) # (vocab, emb_dim)

    # load vocab
    print("Loading dataset vocab from ", dataset_vocab_path)
    vocab_ckpt = torch.load(dataset_vocab_path)
    word2id = vocab_ckpt["src"].base_field.vocab.stoi
    id2word = vocab_ckpt["src"].base_field.vocab.itos
    print("dataset vocab size: ", len(word2id))
    all_words = list(word2id.keys())

    src_concepts = load_file(src_concept_file)

    new_word_ids = []
    new_word_scores = []
    new_word_VAD_scores = []
    unemotional_words_count = []
    for sent_word_ids, sent_word_scores, sent_word_VAD_scores, concepts, emotion in \
        tqdm(zip(concept_words[0], concept_words[1], concept_words[2], src_concepts, emotions), total=len(concept_words[0])):
        
        qualified = []
        emotions_to_exclude = [e for e in id2emotion if e != id2emotion[emotion]]
        neighbor_hood_size = 1
        while len(set([t for r,t,w in qualified])) < num_unemotional_words:
            if neighbor_hood_size == 4:
                # print("only {0} concepts".format(len(set([t for r,t,w in qualified]))))
                break
Пример #28
0
    print("loading conceptnet from {0}...".format(conceptnet_path))
    CN = load_pickle(conceptnet_path)
    CN_concepts = set(get_all_entities(CN))
    print("number of conceptnet triplets: {0}".format(len(CN)))
    print("number of conceptnet entities: {0}".format(len(CN_concepts)))

    # load stopwords
    stopwords = load_pickle("./data/KB/stopwords.pkl")

    # load data
    for split in ["train", "valid", "test"]:
        if dataset == "reddit":
            src_path = "./data/Reddit/{0}-src{1}.txt".format(
                split, "-smaller" if smaller else "")
        elif dataset == "twitter":
            src_path = "./data/Twitter/{0}-src.txt".format(split)

        print("Loading data from {0}".format(src_path))
        src = load_file(src_path)

        print("extracting concepts from dataset...")
        src_concepts = [
            get_concepts(line, CN_concepts, stopwords) for line in tqdm(src)
        ]

        # save augmented conceptnet
        src_concepts_path = src_path.replace(".txt", "-concepts.txt")
        print("saving src concepts to {0}...".format(src_concepts_path))
        with open(src_concepts_path, "w") as f:
            for concepts in src_concepts:
                f.write(", ".join(concepts) + "\n")
Пример #29
0
def analyze_perturbations():
    baseline = util.load_file('baseline_fc_plots', 'baseline_aps')
    ladder = util.load_file('ladder_fc_plots', 'ladder_aps')

    for (i, c) in enumerate(['src', 'target']):
        for l in range(7):
            ladder[c][l] = 1.0 / (ladder[c][l]**2)
            baseline[c][l] = 1.0 / (baseline[c][l]**2)
        ladder[c][7] = i
        baseline[c][7] = i

    for l in range(7):
        ladder_denom = ladder['src'][l] + ladder['target'][l]
        baseline_denom = baseline['src'][l] + baseline['target'][l]
        ladder['src'][l] = ladder['src'][l] / ladder_denom
        ladder['target'][l] = ladder['target'][l] / ladder_denom
        baseline['src'][l] = baseline['src'][l] / baseline_denom
        baseline['target'][l] = baseline['target'][l] / baseline_denom

    data = 5 * np.ones((16, 16))

    set_trace()

    eprint(ladder)

    for (i, c) in enumerate(['src', 'target']):
        for l in range(1, 8):
            x_start = 2 * (8 - l) + 1
            y_start = 9 * i + 2
            data[x_start, y_start:y_start + 3] = ladder[c][l] * np.ones((1, 3))

    light_color = [sns.light_palette((210, 90, 60), input="husl").as_hex()[0]]
    cmap = ListedColormap(
        sns.color_palette("Blues", 10).as_hex() + ['#000000'])
    clipped_cmap = ListedColormap(sns.color_palette("Blues", 10).as_hex())
    cbar_kws = {'ticks': np.arange(0, 1.2, 0.2)}
    ax = sns.heatmap(data,
                     vmin=0,
                     vmax=1.2,
                     cmap=cmap,
                     xticklabels=False,
                     yticklabels=False,
                     cbar_kws=cbar_kws)
    plt.xlabel('Source                                    Target')
    plt.savefig('ladder_aps.png')

    plt.clf()

    data = 5 * np.ones((16, 16))

    eprint(baseline)

    for (i, c) in enumerate(['src', 'target']):
        for l in range(1, 8):
            x_start = 2 * (8 - l) + 1
            y_start = 9 * i + 2
            data[x_start, y_start:y_start + 3] = baseline[c][l] * np.ones(
                (1, 3))

    light_color = [sns.light_palette((210, 90, 60), input="husl").as_hex()[0]]
    cmap = ListedColormap(
        sns.color_palette("Blues", 10).as_hex() + ['#000000'])
    clipped_cmap = ListedColormap(sns.color_palette("Blues", 10).as_hex())
    cbar_kws = {'ticks': np.arange(0, 1.2, 0.2)}
    ax = sns.heatmap(data,
                     vmin=0,
                     vmax=1.2,
                     cmap=cmap,
                     xticklabels=False,
                     yticklabels=False,
                     cbar_kws=cbar_kws)
    plt.xlabel('Source                                    Target')
    plt.savefig('baseline_aps.png')
Пример #30
0

pre_process = False
if "--pre" in sys.argv:
    pre_process = True
is_train = False
if "--train" in sys.argv:
    is_train = True
elif "--test" in sys.argv:
    is_train = False

if pre_process:
    util.pre_process(is_train)
print('pre-preocess done!!')

images, labels = util.load_file(is_train)

init_op = tf.group(tf.global_variables_initializer(),
                   tf.local_variables_initializer())

xs = tf.placeholder(tf.float32, shape = [util.BATCH_SIZE, util.IMAGE_HEIGHT, util.IMAGE_WIDTH, 3])
ys = tf.placeholder(tf.float32, shape = [util.BATCH_SIZE, My_Network.OUTPUT_SIZE])

my_net = My_Network.Mynetwork()
prediction = my_net.model(xs)
loss = my_net.loss_func(prediction, ys)
correct = my_net.count_correct(prediction, ys)
training = my_net.train(LEARNING_RATE, loss)

# run training
with tf.Session() as sess:
Пример #31
0
"""
Classes and functions for uploading to the Small Family archive
"""

import json

import boto3

from util import load_file, handle_datetime

UPLOAD_POLICY = load_file('archive_upload_policy.json')
SESSION_DURATION = 900


class TokenRetriever(object):
    def __init__(self, stsClient):
        self.stsClient = stsClient

    def getToken(self, name):
        """
        Gets a federation token as described at
        https://boto3.readthedocs.org/en/latest/reference/services/sts.html#STS.Client.get_federation_token.

        The result of a successful API call is returned as JSON.
        """
        return json.dumps(self.stsClient.get_federation_token(Name=name,
                                                              Policy=UPLOAD_POLICY,
                                                              DurationSeconds=SESSION_DURATION),
                          default=handle_datetime)

Пример #32
0
    concept_pairs_path = args.concept_pairs_path

    two_step_PPMI = True

    # load data
    if dataset == "reddit":
        src_path = "./data/Reddit/train-src{0}.txt".format("-smaller" if smaller else "")
        tgt_path = "./data/Reddit/train-tgt{0}.txt".format("-smaller" if smaller else "")
        tgt_emotion_path = "./data/Reddit/train-tgt{0}-emotions.txt".format("-smaller" if smaller else "")
    elif dataset == "twitter":
        src_path = "./data/Twitter/train-src.txt"
        tgt_path = "./data/Twitter/train-tgt.txt"
        tgt_emotion_path = "./data/Twitter/train-tgt-emotions.txt"
    
    print("Loading data from {0}".format([src_path, tgt_path, tgt_emotion_path]))
    src = load_file(src_path)
    tgt = load_file(tgt_path)
    tgt_emotions = load_file(tgt_emotion_path)
    tgt_emotions = [id2emotion[int(emo[0])] for emo in tgt_emotions]
    assert len(src) == len(tgt) and len(src) == len(tgt_emotions)
    print("number of conversation pairs: {0}".format(len(src)))
    
    # load conceptnet
    print("loading conceptnet from {0}...".format(conceptnet_path))
    CN = load_pickle(conceptnet_path)
    CN_concepts = set(get_all_entities(CN))
    print("number of conceptnet triplets: {0}".format(len(CN)))
    print("number of conceptnet entities: {0}".format(len(CN_concepts)))

    # load stopwords
    stopwords = load_pickle("./data/KB/stopwords.pkl")
Пример #33
0
 def load_dataset(self, date):
     return load_file(date.year, date.month)
Пример #34
0
def main():
    """ 
    Runs the entire process of computing reaction forces from raw input to 
    final output. The file name containing the raw data should be given 
    as the only argument. The various options are read from the configuration
    file. Since the dimension might be changed often, you can override the
    dimension on the command line.
    """
    print 20*'+'+'TRACKING FORCES'+21*'+'

    # Get the input file(s) with the raw data. 
    parser = argparse.ArgumentParser(description='Compute reaction forces from\
            whisker tracking images.')
    parser.add_argument('data_file', help=".mat file with tracked image data")
    parser.add_argument('--dim', type=int, default=-1)

    args = parser.parse_args()

    if os.path.splitext(args.data_file)[1]:
        data_file = os.path.splitext(args.data_file)[0]  
    else:
        data_file = args.data_file

    # Read the options from the configuration file. If no configuration file,
    # use default.
    file_name = None
    for files in os.listdir('.'):
        if files.endswith('.cfg'):
            file_name = files
    if file_name is None: 
        file_name = resource_filename(Requirement.parse("tracking_forces"),
                                     'tracking_forces/tracking_forces.cfg')
    config = ConfigParser.SafeConfigParser()
    config.read([file_name])
    
    # Create a folder to save the outputs.
    output_dir = './%s/' %data_file
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    dim = args.dim
    if dim < 0:
        dim = config.getint('general', 'dimension')
    mm_per_pixel = config.getfloat('general', 'mm_per_pixel')
    scale = mm_per_pixel/1000 
    dt = config.getfloat('general', 'dt')     

    # Load the raw data, sample the images based on the number of links and 
    # convert to configurations in terms of angles.
    if dim == 2:
        try:
            variable_names = {'x': config.get('convert', 'mat_xname_2d'),
                              'y': config.get('convert', 'mat_yname_2d'),
                              'z': config.get('convert', 'mat_zname_2d'),
                              'cp': config.get('convert', 'mat_cpname_2d')}
        except:
            variable_names = {'x': 'xw', 'y': 'yw', 'z': None, 'cp': 'CP'}

    else:
        try:
            variable_names = {'x': config.get('convert', 'mat_xname_3d'),
                              'y': config.get('convert', 'mat_yname_3d'),
                              'z': config.get('convert', 'mat_zname_3d'),
                              'cp': config.get('convert', 'mat_cpname_3d')}
        except:
            variable_names = {'x': 'xw3d', 'y': 'yw3d', 'z': 'zw3d', 'cp': 'CP'}

    conversion_args = (data_file+'.mat', scale, dim,
                       config.getint('convert', 'N'),
                       config.getint('convert','start'),
                       config.getint('convert','stop'),
                       config.getint('convert','convert_k'),
                       config.getfloat('convert','convert_s'),
                       config.get('convert', 'rx0_method'),
                       True, output_dir, variable_names)
    
    # Perform a new conversion.
    if config.getboolean('convert', 'new_conversion'):
        converted_data = convert_frames(*conversion_args)

    # Load previously converted data.
    else:
        try:
            converted_data = load_file('%s%s.p'%(output_dir,data_file))
        except:
            print "WARNING: Converted data not found. Performing new conversion."
            converted_data = convert_frames(*conversion_args)

    # Get the reference shape (index of frame where whisker is undeformed).
    ref = config.getint('filter', 'ref_index')
    if  ref < 0:
        ref = get_reference_frame(converted_data['CP'])   

    # Filter the trajectories.
    filter_args = (data_file+'.p', output_dir, dt, dim, ref,
                   config.get('filter', 'filter_type'),
                   config._sections['filter'])
    filtered_data = filter_data(*filter_args)

    # Build the whisker system in trep.
    whisker = make_whisker(dim, filtered_data['ref'], 
                           converted_data['link_length'],
                           config.getfloat('whisker', 'rbase'),
                           config.getfloat('whisker', 'taper'),
                           config.getfloat('whisker', 'damping_ratio'),
                           config.getfloat('whisker', 'rho'),
                           config.getfloat('whisker', 'E'))

    # Compute the reaction forces and moments.
    forces_args = (whisker, filtered_data['q'], 
                            filtered_data['v'],
                            filtered_data['a'],
                            filtered_data['cp'], dt,
                            data_file, output_dir,
                            config.getboolean('forces','overwrite'))
    force_data = calc_forces(dim, *forces_args)

    # Plot the results.
    if config.getboolean('plot', 'run_plot'):
        plot_args = ('%s_forces.p' %(data_file), output_dir, './%s.mat' %data_file, 
                     config.getboolean('plot', 'show_plots'))
        plot_and_save(dim, *plot_args)

    # Animate the whisker's motion.
    if config.getboolean('animate', 'run_animation'):
        animate_args = (data_file, output_dir, dt, 
                        config.getboolean('animate', 'show_animation'),
                        config.getboolean('animate', 'save_animation'),
                        config.getboolean('animate', 'debug_conversion'))
        animate_whisker(dim, *animate_args)

    save_output_file(output_dir, data_file)        
Пример #35
0
 def load_sim_file(self):
     self.simi_data = util.load_file(save_file)
Пример #36
0
pre_process = False
if "--pre" in sys.argv:
    pre_process = True
is_train = False
if "--train" in sys.argv:
    is_train = True
elif "--test" in sys.argv:
    is_train = False

if pre_process:
    image_folder_list, label_folder_list = util.produce_path(is_train)
    util.pre_process(image_folder_list, label_folder_list)
print('pre-preocess done!!')

images, labels = util.load_file()

init_op = tf.group(tf.global_variables_initializer(),
                   tf.local_variables_initializer())

my_net = My_Network.Mynetwork()
training = my_net.train(images, labels)

# run training
with tf.Session() as sess:
    sess.run(init_op)

    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    if is_train:
        for i in range(ITERATION):
Пример #37
0
	  <BindItem id="item11" compid="Edit08" propid="value" datasetid="ds_detail" columnid="FXNUM"/>
	  <BindItem id="item12" compid="edt_acnutNo" propid="value" datasetid="ds_detail" columnid="ACNUT_NO"/>
	  <BindItem id="item13" compid="edt_acnutOwnerNm" propid="value" datasetid="ds_detail" columnid="ACNUT_OWNER_NM"/>
	  <BindItem id="item14" compid="cbo_bank" propid="value" datasetid="ds_detail" columnid="DELNG_BANK_CODE"/>
	  <BindItem id="item15" compid="Edit06" propid="value" datasetid="ds_detail" columnid="TLPHON_NO"/>
	  <BindItem id="item16" compid="Edit07" propid="value" datasetid="ds_detail" columnid="MOBLPHON_NO"/>
	  <BindItem id="item17" compid="Edit03" propid="value" datasetid="ds_detail" columnid="ADRES"/>
	</Bind>
'''
	s = '''
				<Cell col="1" text="bind:BUF_GRAD_CODE_NM" editlimit="-1"/>
				<Cell col="2" displaytype="number" edittype="masknumber" text="bind:MCT_POCTPAYM__RATE" mask="99%" editlimit="2" editlimitbymask="both"/>
				<Cell col="3" displaytype="number" edittype="masknumber" text="bind:MCT_DRYGRASS_SILAGE_WGHTVAL" mask="9.9" editlimit="3" editlimitbymask="both" suppress="0"/>
				<Cell col="4" displaytype="number" edittype="masknumber" text="bind:MCT_SPORT_TON_AMOUNT" mask="99,999" editlimit="5" editlimitbymask="both" combodisplayrowcount="5"/>
			  </Band>
'''

	print ''
	mydict = util.load_file("dict.txt")
	o = BindItemVar(s, mydict)
	print o.getVarStr();
	print ''
	print ''
	print o.getChkStr()
	print ''
	print o.getChkStr2()
	print ''
	print o.getChkStr3()


		
Пример #38
0
selection: """

# list of readLines when we first loaded the file. This way if we need to start the decipher over we can without
# reading the file
file_contents = []
# list to be manipulated
cipher_text_list = []
# keep a history of changes for further analysis later
history = History()
substitution = Substitution()
poly_alphabetic = PolyAlphabetic()
selection = ""
while selection != "x":
    selection = input(menu)
    if selection == "1":
        file_contents = util.load_file(history)
        cipher_text_list = file_contents
        print("Done")
    if selection == "2":
        # reset everything but history
        cipher_text_list = file_contents
        history.append("resetting cipher to", str(cipher_text_list))
        substitution = Substitution()
        print("Done")
    if selection == "3":
        history.display()
    if selection == "4":
        history.save(input("output file to save to: "))
    if selection == "5":
        util.find_pattern(cipher_text_list, history)
    if selection == "6":
Пример #39
0
def test_reportsettings_merge_basic(info):
    info.run_merge()
    exp = util.load_file(info.expected)
    res = util.load_file(info.result)
    util.file_compare(exp, res)
    info.cleanup()
Пример #40
0
import util
from keyword_anaylze import keyword_anaylze

date = "2015-12-02"
topic_loc = "../Resources/topic/"
result_loc = "../Resources/result/"

if __name__ == "__main__":
    global topic_loc, result_loc
    global date

    topic = util.load_file(topic_loc + date)

    for tp in topic:
        print(tp)

        ka = keyword_anaylze(date)
        senti, news, network, counter = ka.anaylze(tp)

        f = open(result_loc + tp, "w")
        f.write("{0:.1f}% (p:{1}, n:{2})\n\n".format(
            senti[1] / (senti[1] + senti[0]) * 100, senti[1], senti[0]))

        # write counter of each community
        f.write("positive\n")
        for i in range(8):
            f.write("{0}. {1}\n".format(i, counter[2 * i + 1]))
        f.write("negative\n")
        for i in range(8):
            f.write("{0}. {1}\n".format(i, counter[2 * i]))
Пример #41
0
def make_stereogram(depthmap, pattern, color):
    dm_img = util.load_file(depthmap)
    dm_img = util.redistribute_grays(dm_img, FORCE_DEPTH)

    pattern_width = int((dm_img.size[0] / PATTERN_FRACTION))
    canvas_img = im.new(mode="RGB",
                        size=(dm_img.size[0] + pattern_width, dm_img.size[1]),
                        color=color)
    pattern_strip_img = im.new(mode="RGB",
                               size=(pattern_width, dm_img.size[1]),
                               color=(0, 0, 0))
    pattern_raw_img = util.load_file(pattern)
    p_w = pattern_raw_img.size[0]
    p_h = pattern_raw_img.size[1]

    pattern_raw_img = pattern_raw_img.resize(
        (pattern_width, (int)((pattern_width * 1.0 / p_w) * p_h)), im.LANCZOS)
    region = pattern_raw_img.crop(
        (0, 0, pattern_raw_img.size[0], pattern_raw_img.size[1]))
    y = 0

    while y < pattern_strip_img.size[1]:
        pattern_strip_img.paste(
            region,
            (0, y, pattern_raw_img.size[0], y + pattern_raw_img.size[1]))
        y += pattern_raw_img.size[1]

    # Oversample. Smoother results.
    dm_img = dm_img.resize(((int)(dm_img.size[0] * OVERSAMPLE),
                            (int)(dm_img.size[1] * OVERSAMPLE)))
    canvas_img = canvas_img.resize(((int)(canvas_img.size[0] * OVERSAMPLE),
                                    (int)(canvas_img.size[1] * OVERSAMPLE)))
    pattern_strip_img = pattern_strip_img.resize(
        ((int)(pattern_strip_img.size[0] * OVERSAMPLE),
         (int)(pattern_strip_img.size[1] * OVERSAMPLE)))
    pattern_width = pattern_strip_img.size[0]

    def shift_pixels(dm_start_x, depthmap_image_object, canvas_image_object,
                     direction):
        depth_factor = pattern_width * SHIFT_RATIO
        cv_pixels = canvas_image_object.load()
        while 0 <= dm_start_x < dm_img.size[0]:
            for dm_y in range(depthmap_image_object.size[1]):
                constrained_end = max(
                    0,
                    min(dm_img.size[0] - 1,
                        dm_start_x + direction * pattern_width))
                for dm_x in range(int(dm_start_x), int(constrained_end),
                                  direction):
                    dm_pix = dm_img.getpixel((dm_x, dm_y))
                    px_shift = int(dm_pix / 255.0 * depth_factor *
                                   (1 if WALL else -1)) * direction
                    if direction == 1:
                        cv_pixels[dm_x + pattern_width,
                                  dm_y] = canvas_img.getpixel(
                                      (px_shift + dm_x, dm_y))
                    if direction == -1:
                        cv_pixels[dm_x, dm_y] = canvas_img.getpixel(
                            (dm_x + pattern_width + px_shift, dm_y))

            dm_start_x += direction * pattern_strip_img.size[0]

    dm_center_x = dm_img.size[0] / 2
    canvas_img.paste(pattern_strip_img,
                     (int(dm_center_x), 0, int(dm_center_x + pattern_width),
                      canvas_img.size[1]))
    if not WALL:
        canvas_img.paste(pattern_strip_img,
                         (int(dm_center_x - pattern_width), 0,
                          int(dm_center_x), canvas_img.size[1]))
    shift_pixels(dm_center_x, dm_img, canvas_img, 1)
    shift_pixels(dm_center_x + pattern_width, dm_img, canvas_img, -1)

    if pattern:
        canvas_img = canvas_img.resize(
            ((int)(canvas_img.size[0] / OVERSAMPLE),
             (int)(canvas_img.size[1] / OVERSAMPLE)),
            im.LANCZOS)  # NEAREST, BILINEAR, BICUBIC, LANCZOS

    return canvas_img
Пример #42
0
import util
from keyword_anaylze import keyword_anaylze

date  = "2015-12-02"
topic_loc = "../Resources/topic/"
result_loc = "../Resources/result/"

if __name__ == "__main__":
	global topic_loc, result_loc
	global date

	topic = util.load_file(topic_loc+date)

	for tp in topic:
		print(tp)

		ka = keyword_anaylze(date)
		senti, news, network, counter = ka.anaylze(tp)
		
		f = open(result_loc+tp, "w")
		f.write("{0:.1f}% (p:{1}, n:{2})\n\n" .format(senti[1]/(senti[1]+senti[0])*100, senti[1], senti[0]))
		
		# write counter of each community
		f.write("positive\n")
		for i in range(8):
			f.write("{0}. {1}\n" .format(i, counter[2*i+1]))
		f.write("negative\n")
		for i in range(8):
			f.write("{0}. {1}\n" .format(i, counter[2*i]))

		for n in news:
Пример #43
0
# -*- coding: utf-8 -*-

import re
from util import load_random_substring, load_file

random_file_content = load_random_substring(load_file(), 5000)

lines = random_file_content.split('\n')[1:-1]
words = re.compile(r'\s').split(random_file_content)

print([(len(w), w) for w in words])
Пример #44
0
def test_sv_merge_basic(info):
    info.run_merge()
    exp = util.load_file(info.expected)
    res = util.load_file(info.result)
    util.file_compare(exp, res, sv_flattenify)
    info.cleanup()
Пример #45
0
def test_lv_parse(ff):
    exp = util.load_file(ff)
    LV = fromfn(ff)
    res = LV.toxml()
    util.file_compare(exp, res)
Пример #46
0
limit_max = 80

np.random.seed(231)
################################################################################
# Load CV Data
################################################################################
DATA_DIR = 'dataset/CVSplits/'
num_folds = 5

data_cv = []
label_cv = []
max_col_prior = 0

# First get what is the maximum number of features across all folds
for i in range(num_folds):
    _, _, max_col_prior = util.load_file(
        DATA_DIR + 'training0' + str(i) + '.data', max_col_prior)

#print(max_col_prior)

for i in range(num_folds):
    data_fold, label_fold, max_col_prior = util.load_file(
        DATA_DIR + 'training0' + str(i) + '.data', max_col_prior)
    data_cv.append(data_fold)
    label_cv.append(label_fold)

################################################################################
# Load Train, Dev and Test Data
################################################################################
DATA_DIR = 'dataset/'
data_tr, label_tr, max_col_prior = util.load_file(DATA_DIR + 'diabetes.train',
                                                  max_col_prior)
Пример #47
0
    if (n == m):
        print(res)
    else:
        print(res[n - m:m - n])


def tester_acorr(dt, n, m):

    A = dt_in[0:n]

    res = auto_corr_opt(A, n, m)
    print(res)

    res = np.correlate(A, A, "full")
    if (n == m):
        print(res)
    else:
        print(res[n - m:m - n])


if __name__ == '__main__':
    fn = "crosscorr.in"
    dt_in = util.load_file(fn)
    dt_in = [x + y * 1j for x, y in dt_in]

    #tester_xcorr(dt_in, 5, 3)
    #tester_xcorr(dt_in, 16, 16)
    #tester_xcorr(dt_in, 32, 32)
    #tester_xcorr(dt_in, 32, 16)
    #tester_xcorr(dt_in, 39, 25)
    tester_acorr(dt_in, 16, 16)
Пример #48
0
import argparse

from util import load_file

if __name__ == "__main__":
    parser = argparse.ArgumentParser()

    parser.add_argument('--path',
                        required=True,
                        type=str,
                        help='The path to the output file')

    args = parser.parse_args()
    path = args.path

    lines = load_file(path)

    avg_length = len([w for l in lines for w in l]) / len(lines)

    print("Average sentence length: {0}".format(avg_length))