Exemplo n.º 1
0
 def infer(self, input):
     # load preprocessed input
     inputAsNpArr1 = self._imageProcessor.loadAndPreprocess(
         input["t1"]["fileurl"], id="t1")
     inputAsNpArr2 = self._imageProcessor.loadAndPreprocess(
         input["t1c"]["fileurl"], id="t1c")
     inputAsNpArr3 = self._imageProcessor.loadAndPreprocess(
         input["t2"]["fileurl"], id="t2")
     inputAsNpArr4 = self._imageProcessor.loadAndPreprocess(
         input["flair"]["fileurl"], id="flair")
     # postprocessing
     print('loading done')
     affine = self._imageProcessor.returnAffine(input["t1"]["fileurl"])
     print('affine recovered')
     dataset = preprocessForNet1(inputAsNpArr1, inputAsNpArr2,
                                 inputAsNpArr3, inputAsNpArr4, affine)
     print('dataset assembled')
     print('first U-Net running')
     outNet1 = predict1(dataset, self._model1)
     print('preprocessing step 2')
     dataset = preprocessForNet2(dataset, outNet1[0])
     print('second U-Net running')
     outNet2 = predict2(dataset, self._model2)
     #outNet2 = predict2(outNet1, self._model2)
     print('done, postprocessing now')
     output = self._imageProcessor.computeOutput(outNet2[0])
     return output
def process(tag_name, tc, save_flag=False, try_times=2000):
    """ 学習とその検証を繰り返して、結果をファイルに保存する
    """
    _dir = create_dir([tag_name])

    with open(tag_name + "/av_verify_report" + tag_name + ".csv", "w") as fw:
        for i in range(try_times):
            print("--try count: {0}/{1}--".format(i, try_times))

            # 教師データを作成
            verify_data, teacher_dates, x, y = None, None, None, None
            if save_flag:
                _save_name_teacher = tag_name + "/av_teaching_data" + tag_name + "_{0:05d}".format(i) + ".csv" # tc.save_teacher(ファイル名)で教師と検証データを保存するときに使う
                _save_name_verify = tag_name + "/av_verify_data" + tag_name + "_{0:05d}".format(i) + ".csv"
                verify_data, teacher_dates, x, y = tc.save_teacher(save_name_teacher=_save_name_teacher, save_name_verify=_save_name_verify) # 毎回、呼び出すたびにデータセットの内容が変わる
            else:
                verify_data, teacher_dates, x, y = tc.create_dataset() # 毎回、呼び出すたびにデータセットの内容が変わる
            features_dict = tc.get_all_features()
            training_data = (x, y)
            

            # 学習と保存
            clf, score = learning.learn(training_data)
            mc.save(clf, tag_name)
            fw.write("{0},{1}\n".format(i, score))

            # 過学習の確認(現状では役に立っていない)
            dates = sorted(verify_data.keys())                   # 学習に使っていない日付
            if len(dates) == 0:
                continue
            result = predict.predict2(clf, dates, features_dict) # 学習に使っていないデータで検証
Exemplo n.º 3
0
    def handle(self):

        global sensor_data
        stream_bytes = b' '

        try:
            # stream video frames one by one
            while True:
                stream_bytes += self.rfile.read(1024)
                first = stream_bytes.find(b'\xff\xd8')
                last = stream_bytes.find(b'\xff\xd9')
                if first != -1 and last != -1:
                    jpg = stream_bytes[first:last + 2]
                    stream_bytes = stream_bytes[last + 2:]
                    #gray = cv2.imdecode(np.frombuffer(jpg, dtype=np.uint8), cv2.IMREAD_GRAYSCALE)
                    image = cv2.imdecode(np.frombuffer(jpg, dtype=np.uint8),
                                         cv2.IMREAD_COLOR)
                    cv2.imshow('image', image)
                    if cv2.waitKey(1) & 0xFF == ord('q'):
                        print("car stopped")
                        break
                    self.count = self.count + 1
                    while self.count > 50:
                        self.count = 0
                        #predict
                        direction = predict2(image)
                        self.car.steer(direction)

        finally:
            cv2.destroyAllWindows()
            sys.exit()
Exemplo n.º 4
0
def retail():
	if request.method=="POST":


		print(request.form)
		if request.form:
			try:
				steps=int(request.form.to_dict(flat=False)['steps'][0])
				print(steps)
				if steps<=12:

					prediction1 = predict(steps)
					prediction2 = predict2(steps)
					prediction3 = predict3(steps)
					prediction4 = predict4(steps)
					return render_template('retail.html',steps = steps, prediction1=prediction1, prediction2=prediction2, prediction3=prediction3, prediction4=prediction4)
				else:
					steps = 12
					prediction1 = predict(steps)
					prediction2 = predict2(steps)
					prediction3 = predict3(steps)
					prediction4 = predict4(steps)
					return render_template('retail.html',steps = steps, prediction1=prediction1, prediction2=prediction2, prediction3=prediction3, prediction4=prediction4)
			except:
				prediction1 = None
				prediction2 = None
				prediction3 = None
				prediction4 = None
				return render_template('retail.html', prediction1=prediction1, prediction2=prediction2, prediction3=prediction3, prediction4=prediction4)


		else:
			prediction1 = None
			prediction2 = None
			prediction3 = None
			prediction4 = None
			return render_template('retail.html', prediction1=prediction1, prediction2=prediction2, prediction3=prediction3, prediction4=prediction4)
	else:
		prediction1 = None
		prediction2 = None
		prediction3 = None
		prediction4 = None
		return render_template('retail.html', prediction1=prediction1, prediction2=prediction2, prediction3=prediction3, prediction4=prediction4)
def process(tag_name, tc, save_flag=False, try_times=2000):
    """ 学習とその検証を繰り返して、結果をファイルに保存する
    """
    _dir = create_dir([tag_name])

    with open(tag_name + "/av_verify_report" + tag_name + ".csv", "w") as fw:
        for i in range(try_times):
            print("--try count: {0}/{1}--".format(i, try_times))

            # 教師データを作成
            verify_data, teacher_dates, x, y = None, None, None, None
            if save_flag:
                _save_name_teacher = tag_name + "/av_teaching_data" + tag_name + "_{0:05d}".format(
                    i) + ".csv"  # tc.save_teacher(ファイル名)で教師と検証データを保存するときに使う
                _save_name_verify = tag_name + "/av_verify_data" + tag_name + "_{0:05d}".format(
                    i) + ".csv"
                verify_data, teacher_dates, x, y = tc.save_teacher(
                    save_name_teacher=_save_name_teacher,
                    save_name_verify=_save_name_verify
                )  # 毎回、呼び出すたびにデータセットの内容が変わる
            else:
                verify_data, teacher_dates, x, y = tc.create_dataset(
                )  # 毎回、呼び出すたびにデータセットの内容が変わる
            features_dict = tc.get_all_features()
            training_data = (x, y)

            # 学習と保存
            clf, score = learning.learn(training_data)
            mc.save(clf, tag_name)
            fw.write("{0},{1}\n".format(i, score))

            # 過学習の確認(現状では役に立っていない)
            dates = sorted(verify_data.keys())  # 学習に使っていない日付
            if len(dates) == 0:
                continue
            result = predict.predict2(clf, dates,
                                      features_dict)  # 学習に使っていないデータで検証
def sub_process(tag_name, target_dir, feature_generator, terms):
	""" 保存されている学習器を次々と読みだして評価結果をファイルに保存する
	"""
	flist = mc.get_path_list(target_dir) # 学習器のファイルリストを取得
	print(flist)
	if len(flist) == 0:
		print("0 files or dir found.")
		return

	# 正解データと特徴ベクトルを取得
	data = read_correct_and_create_features(feature_generator, terms)

	# 評価用に特徴ベクトルを辞書に格納しなおす
	dates = sorted(data.keys())
	features_dict = {}                                      # 日付をキーとした特徴ベクトル
	for _date in dates:
		features_dict[_date] = (None, data[_date][2], None) # 特徴ベクトルが欲しいだけなので前後をダミーデータを入れている

	# 予想結果を格納する
	predicted_result_dict = {}
	for fpath in flist:
		# 学習器を読みだして復元
		clf = mc.load(fpath)    # オブジェクト復元
		predicted_result = predict.predict2(clf, dates, features_dict)
		predicted_result_dict[fpath] = predicted_result

	# 結果をファイルに保存
	report_path = target_dir + "/learned_machine_report.csv"
	with open(report_path, "w") as fw:
		# 閾値を変えつつ集計
		for th in numpy.arange(0.4, 0.9, 0.2): # th: 閾値
			result = {}                        # 閾値で2値化した結果を格納する
			correct = []
			for fpath in flist:
				predicted_result = predicted_result_dict[fpath]
				if fpath not in result:
					result[fpath] = []
				correct = []
				for _date in dates:
					if _date in predicted_result:
						c = data[_date][0]
						correct.append(c)
						val = float(c) - int((1.0 - th) + predicted_result[_date])
						result[fpath].append(val)

			# 日付を書き込む
			dates_arr = [str(x) for x in dates]
			_str = ",".join(dates_arr)
			fw.write(",date,")
			fw.write(_str)
			fw.write("\n")
			# 正解を書き込む
			correct = [str(c) for c in correct]
			_str = ",".join(correct)
			fw.write(",correct,")
			fw.write(_str)
			fw.write("\n")
			# 結果を書き込む
			for fpath in flist:
				th_data = result[fpath]
				th_data = [str(x) for x in th_data]
				_str = ",".join(th_data)
				fw.write(fpath)          # Excelで閲覧した時に分離させる
				fw.write(",")
				fw.write(str(th))
				fw.write(",")
				fw.write(_str)
				fw.write("\n")
	return report_path
def sub_process2(tag_name, target_dir, feature_generator, terms):
	""" 保存されている学習器を次々と読みだして評価結果をファイルに保存する
	"""
	flist = mc.get_path_list(target_dir) # 学習器のファイルリストを取得
	print(flist)
	if len(flist) == 0:
		print("0 files or dir found.")
		return

	# 正解データと特徴ベクトルを取得
	data = read_correct_and_create_features(feature_generator, terms)
	dates = sorted(data.keys())

	# 正解のカウントと計算条件のチェック
	correct = [int(data[_date][0]) for _date in dates]
	print("correct: ", correct)
	amount_of_positive = correct.count(1)
	amount_of_negative = correct.count(0)
	if amount_of_positive == 0 or amount_of_negative == 0: # いずれかが0であれば、計算する意味が無い
		print("amount of positive/negative is 0. So, fin.")
		return

	# 評価用に特徴ベクトルを辞書に格納しなおす
	features_dict = {}                                      # 日付をキーとした特徴ベクトル
	for _date in dates:
		features_dict[_date] = (None, data[_date][2], None) # 特徴ベクトルが欲しいだけなので前後をダミーデータを入れている

	# 予想結果を格納する
	predicted_result_dict = {}
	for fpath in flist:
		# 学習器を読みだして復元
		clf = mc.load(fpath)    # オブジェクト復元
		predicted_result = predict.predict2(clf, dates, features_dict, save=False, feature_display=False) # 何が返ってくるか忘れないために、一度変数に入れている。
		predicted_result_dict[fpath] = predicted_result

	# AUCを求め、結果をファイルに保存	
	roc_report = target_dir + "/learned_machine_report_roc.csv"
	auc_report = target_dir + "/learned_machine_report_auc.csv"
	auc_max = (0, "")
	with open(roc_report, "w") as fw_roc, open(auc_report, "w") as fw_auc:
		for fpath in flist:
			predicted_result = predicted_result_dict[fpath]
			# 閾値を変えつつ集計
			tpr_array = []
			fpr_array = []
			for th in numpy.arange(0.1, 1.0, 0.05): # th: 閾値
				tp = 0 # true positive
				fp = 0 # false positive
				for _date in dates:
					c = int(data[_date][0])
					result = int((1.0 - th) + predicted_result[_date] + 0.0001)
					if c == 0 and result == 1:
						fp += 1
					elif c == 1 and result == 1:
						tp += 1
				tp_rate = tp / amount_of_positive
				fp_rate = fp / amount_of_negative
				tpr_array.append(tp_rate)
				fpr_array.append(fp_rate)

				fw_roc.write("{0},{1},{2},{3}\n".format(fpath, th, fp_rate, tp_rate))

			# AUC(ROC曲線の下の面積)を求める
			tpr_array.append(0)
			fpr_array.append(0)
			_x, _y, auc = 1, 1, 0
			for x, y in zip(fpr_array, tpr_array):
				w = _x - x
				auc += (y + _y) * w / 2 # 台形積分
				_x = x
				_y = y
			fw_auc.write("{0},{1}\n".format(fpath, auc))
			if auc_max[0] < auc:
				auc_max = (auc, fpath)
		fw_auc.write("AUC max:{0},{1}\n".format(auc_max[1], auc_max[0]))
		print("AUC max:", auc_max)
	return roc_report # この返り値には何の意味があったっけ?
Exemplo n.º 8
0
#LoadData
data_transforms, image_datasets, dataloaders = predict.data_transforms()
#build the Model
model = predict.modelSetup(arch)
#froze paramter
predict.frozeParameters(model)

#Getting device info.
device = torch.device("cuda:0" if device == 'gpu' else "cpu")

#Build and trainning the classifer
classifier, model, criterion, optimizer = predict.make_classifier(
    arch, model, device, hidden_units, dropout, learning_rate)
#predict.train_model(dataloaders,optimizer,model,device,epochs,criterion)

#Save CheckPoint
#predict.saveCheckPoint(model,image_datasets,epochs,optimizer)
#print('Model Trained')

#Get Category Name
#--cat_fileName
cat_to_name = predict.label_mapping(cat_fileName)
#predict.validation_accuracy(dataloaders["test"],model,device,learning_rate)
image = predict.process_image(image_path)

probabilitys, classes = predict.predict2(image, model, device, topk)
#print(probabilitys)
#print(classes)
predict.check_sanity(image, probabilitys, classes, cat_to_name)
def process(tag_name, tc, feature_func, save_flag=False, try_times=2000):
	""" 学習とその検証を繰り返して、結果をファイルに保存する
	"""
	create_dir([tag_name])

	with open(tag_name + "/av_verify_report" + tag_name + ".csv", "w") as fw:
		for i in range(try_times):
			print("--try count: {0}/{1}--".format(i, try_times))

			# 教師データを作成
			verify_data, teacher_dates, teacher_features, teacher_flags = None, None, None, None
			if save_flag:
				_save_name_teacher = tag_name + "/av_teaching_data" + tag_name + "_{0:05d}".format(i) + ".csv" # tc.save_teacher(ファイル名)で教師と検証データを保存するときに使う
				_save_name_verify = tag_name + "/av_verify_data" + tag_name + "_{0:05d}".format(i) + ".csv"
				verify_data, teacher_dates, teacher_features, teacher_flags = tc.save_teacher(save_name_teacher=_save_name_teacher, save_name_verify=_save_name_verify) # 毎回、呼び出すたびにデータセットの内容が変わる
			else:
				verify_data, teacher_dates, teacher_features, teacher_flags = tc.create_dataset() # 毎回、呼び出すたびにデータセットの内容が変わる
			features_dict = tc.get_all_features()
			training_data = (teacher_features, teacher_flags)
			dates = sorted(verify_data.keys())                   # 学習に使っていない日付

			# 学習
			clf = learning.learn(training_data, tag_name + "/av_entry_temp{0}_{1:05d}.pickle".format(tag_name, i))
			result = predict.predict2(clf, dates, features_dict) # 学習に使っていないデータで検証
			
			# 必要なら個別の結果も保存
			if save_flag:
				with open(tag_name + "/av_verify_data" + tag_name + "_{0:05d}".format(i) + "_result.csv", "w") as fw_result:
					for date in dates:
						if date in result:
							try:      # このtryはいらないんじゃないかな・・・
								fw_result.write(str(date))
								fw_result.write(",")
								fw_result.write(str(verify_data[date]))
								fw_result.write(",")
								fw_result.write(str(result[date]))
								fw_result.write("\n")
							except:
								pass

			# 結果の集計
			scale = 10
			zero = [0.000001] * scale # sum()して分母に入れようとしたら、0の時にエラーが出るので0.000001とした
			one = [0.000001] * scale
			for date in dates:
				if date in result:
					try:              # このtryはいらないんじゃないかな・・・
						c = verify_data[date]
						val = c - result[date]
						if int(c) == 0:
							zero[abs(int(val * scale))] += 1
						elif int(c) == 1:
							one[abs(int(val * scale))] += 1
						#print(val)
					except:
						pass

			# 最終結果の一覧ファイルへの保存
			zero = [str(x / sum(zero)) for x in zero] # 正規化
			one = [str(x / sum(one)) for x in one]
			fw.write("{0},".format(i))
			fw.write(",".join(zero))
			fw.write(",,") # Excelで閲覧した時に分離させる
			fw.write(",".join(one))
			fw.write("\n")
def sub_process(tag_name, target_dir, feature_func, raw_data, terms):
	""" 保存されている学習器を次々と読みだして評価結果をファイルに保存する
	"""
	flist = glob.glob(target_dir + "/*f" + tag_name + "*.pickle") # 学習器のファイルリストを取得
	print(flist)
	if len(flist) == 0:
		print("0 file found.")
		return

	# 正解データと特徴ベクトルを取得
	data = read_correct_and_create_features(feature_func, raw_data, terms)

	# 評価用に特徴ベクトルを辞書に格納しなおす
	dates = sorted(data.keys())
	features_dict = {}                                      # 日付をキーとした特徴ベクトル
	for _date in dates:
		features_dict[_date] = (None, data[_date][2], None) # 特徴ベクトルが欲しいだけなので前後をダミーデータを入れている

	# 予想結果を格納する
	predicted_result_dict = {}
	for fname in flist:
		# 学習器を読みだして復元
		clf = None
		with open(fname, 'rb') as f:
			clf = pickle.load(f)    # オブジェクト復元
		predicted_result = predict.predict2(clf, dates, features_dict)
		predicted_result_dict[fname] = predicted_result

	# 結果をファイルに保存
	report_path = target_dir + "/learned_machine_report.csv"
	with open(report_path, "w") as fw:
		# 閾値を変えつつ集計
		for th in numpy.arange(0.5, 1.0, 0.1): # th: 閾値
			result = {}                        # 閾値で2値化した結果を格納する
			for fname in flist:
				predicted_result = predicted_result_dict[fname]
				if fname not in result:
					result[fname] = []
				for _date in dates:
					if _date in predicted_result:
						c = data[_date][0]
						val = float(c) - int((1.0 - th) + predicted_result[_date])
						result[fname].append(val)

			# 日付を書き込む
			dates_arr = [str(x) for x in dates]
			_str = ",".join(dates_arr)
			fw.write(",,")
			fw.write(_str)
			fw.write("\n")
			# 結果を書き込む
			for fname in flist:
				th_data = result[fname]
				th_data = [str(x) for x in th_data]
				_str = ",".join(th_data)
				fw.write(fname)          # Excelで閲覧した時に分離させる
				fw.write(",")
				fw.write(str(th))
				fw.write(",")
				fw.write(_str)
				fw.write("\n")
	return report_path