Exemple #1
0
def main(settings_file: str):
    """
    The function calls the function of downloading and checking all files
    then the data analysis function
    Parameters
    ----------
    settings_file : str
        The name of the settings file
        which is obtained from the command line
    Returns
    -------
    None.
    """
    class_error.student_info()
    class_error.condition_info()
    class_error.working_out()
    try:
        print('ini ' + settings_file + ' : ', end="")
        ini = load_data.load_ini(settings_file)
    except OSError:
        raise class_error.SettingsFileError()
    except ValueError:
        raise class_error.SettingsFileError()
    data = class_information.Information()
    load_data.load_file(data, ini[0]['csv'], ini[0]['json'],
                        ini[0]['encoding'])
    data.data_analysis(ini[1]['fname'], ini[1]['encoding'])
Exemple #2
0
def update_plot_raw_signal():
    new_file_name = file_menu.value
    data = load_data.load_file(new_file_name, _WINDOW_SCALING)
    data_raw_signal.data = dict(x=np.arange(0,
                                            _SPARSNESS * len(data),
                                            step=_SPARSNESS),
                                y=data)
Exemple #3
0
def main(method_flag):
	# load data
	source_df, target_df = ld.load_file()

	predicts, corrects = [], []

	random.seed(123)
	np.random.seed(123)

	kf = KFold(shuffle=False,random_state=1,n_splits=mc._FOLD_NUM)
	fold_num = 1
	cnt = 0
	for train, test in kf.split(target_df):
		print('{0}/{1}'.format(fold_num, mc._FOLD_NUM))
		target_train = target_df.iloc[train]
		target_test = target_df.iloc[test]

		idx, labels = transfer_model(source_df, target_train, target_test, method_flag, fold_num)
		predicts.extend(idx.tolist())
		corrects.extend(labels[0].tolist())
		fold_num = fold_num+1

	# save results
	predicts = np.array(predicts)
	corrects = np.array(corrects)
	err = []
	for i in range(len(predicts)):
		if predicts[i] == corrects[i]:
			err.append(0)
		else:
			err.append(1)
	test = np.concatenate((np.reshape(predicts,[len(predicts),1]),np.reshape(corrects,[len(corrects),1]),\
	np.reshape(err,[len(err),1])), axis=1)
	save_data = pd.DataFrame(test)
	save_data.to_csv('%s'%(mc._RESULT_FILE),index=False,header=False)
	#save_data.to_csv('../results/results.csv',index=False,header=False)

	fp = open('%s'%(mc._RESULT_FILE),'a')
	#fp = open('../results/results.csv','a')
	fp.write('%f\n'%((1.0-np.mean(err))*100.0))
	fp.close()
Exemple #4
0
 def load_file(self, filename):
   ''' 
   load file to network, currently supporting only tsv
   '''
   import load_data
   load_data.load_file(self, filename)
Exemple #5
0
import load_data as ld

h, w, names, coordinates, distances = ld.get_data_to_dict(ld.load_file("./metro.gr"))
ld.draw_subway(h, w, coordinates, distances)
p, d = ld.dijkstra(distances, 0, 350)
print(p)
print(list(map(lambda x: names[x], p)))
ld.draw_subway(h, w, coordinates, distances, path=p, file_name="trajet.svg")
 def load_file(self, filename):
     import load_data
     load_data.load_file(self, filename)
    def load_file(self, filename):
        import load_data

        load_data.load_file(self, filename)
Exemple #8
0
        spectrum_source.data = dict(f=f, y=spectrum)
    spectrum_plot.x_range.end = freq.value * 0.001

    alphas = []
    for x in bins:
        a = np.zeros_like(eq_range)
        n = int(ceil(x))
        a[:n] = (1 - eq_range[:n] * 0.05)
        alphas.append(a)
    eq_source.data['alpha'] = np.hstack(alphas)


_SPARSNESS = 3
_WINDOW_SCALING = 2
_BASE_FILE_NAME = load_data.get_file_names()[0]
_BASE_FILE = load_data.load_file(_BASE_FILE_NAME, _WINDOW_SCALING)
_BASE_FILE_RAW = load_data.load_raw_file(_BASE_FILE_NAME)
MAX_FREQ_KHZ = MAX_FREQ * 0.001
NUM_GRAMS = 800
GRAM_LENGTH = 512
TILE_WIDTH = 200
EQ_CLAMP = 20
PALETTE = [
    '#081d58', '#253494', '#225ea8', '#1d91c0', '#41b6c4', '#7fcdbb',
    '#c7e9b4', '#edf8b1', '#ffffd9'
]
PLOTARGS = dict(tools="", toolbar_location=None, outline_line_color='#595959')

data_raw_signal = ColumnDataSource(
    data=dict(x=np.arange(0, _SPARSNESS * len(_BASE_FILE), step=_SPARSNESS),
              y=_BASE_FILE))
Exemple #9
0
from flask import Flask
from flask import request, jsonify
import load_data

app = Flask(__name__)

l = load_data.load_file()  #获取数据,储存到l中


def find(l, d):
    '从数据集中寻找,返回房间号,找不到则返回-1'
    for i in l:
        if i['id'] == d['id'] and i['data'] == d['data']:
            return i['num']
    return -1


@app.route('/found', methods=['POST'])
def index():
    print('接收到数据:', request.json)
    rec_l = request.json
    return_list = [int(find(l, i)) for i in rec_l]
    return jsonify(return_list)


app.run()
Exemple #10
0
import load_data
import gale_shapley

team_file = "teams.tsv"
app_file = "applicants.tsv"
out_file = "out.csv"

if __name__ == "__main__":
    print("Starting...")
    teams = load_data.load_file(team_file, load_data.team)
    print("Loaded teams...")
    apps = load_data.load_file(app_file, load_data.app)
    print("Loaded applicants...")
    teams, rejects, itterations = gale_shapley.run(teams, apps)
    print("Process done in " + str(itterations) + " iterations...")
    with open(out_file, 'w') as f:
        sep_char = ","
        for team in teams:
            f.write("'==" + sep_char + team.serialize(sep_char) + '\n')
            for member in team.members:
                f.write(team.name + sep_char + member.serialize(sep_char) +
                        '\n')
        f.write("'==" + sep_char + "Teamless\n")
        for reject in rejects:
            f.write(reject.Data_read_from)
    print("File written")
Exemple #11
0
 def load_file(self, filename):
     '''
 load file to network, currently supporting only tsv
 '''
     import load_data
     load_data.load_file(self, filename)