def setUp(self): data_handler.JsonParser = tests.fakers.FakeParser data_handler.MetadataService = tests.fakers.FakeProvider data_handler.LocalFile = tests.fakers.FakeProvider self._data_handler = data_handler.DataHandler() self._data_handler_meta = data_handler.DataHandler( provider="metadata-service")
def test_generate_querents(self): expect_json = ''' [ { "日付": "2020-03-20", "小計": 100 }, { "日付": "2020-03-21", "小計": 117 }, { "日付": "2020-03-22", "小計": 99 }, { "日付": "2020-03-23", "小計": 311 } ] '''.strip() null_data = self.__generate_null_data(datetime.datetime(2020, 3, 24)) expect = json.loads(expect_json) expect.extend(null_data) dh = handler.DataHandler( patients_csvfile=self.patients_csvfile, data_summary_csvfile=self.data_summary_csvfile) result = dh.generate_querents() self.assertListEqual(result, expect)
def test_generate_patients_summary_by_date(self): expect_json = ''' [{ "日付": "2020-03-17", "小計": 1 }, { "日付": "2020-03-18", "小計": 0 }, { "日付": "2020-03-19", "小計": 2 }, { "日付": "2020-03-20", "小計": 3 }] '''.strip() # テストデータのため2020-03-21から本日までの日付のデータを作成する null_data = self.__generate_null_data(datetime.datetime(2020, 3, 21)) expect = json.loads(expect_json) expect.extend(null_data) dh = handler.DataHandler( patients_csvfile=self.patients_csvfile, data_summary_csvfile=self.data_summary_csvfile) result = dh.generate_patients_summary_by_date() self.assertListEqual(result, expect)
def __init__(self): # initial placeholder dictionary self.X = {} self.gt_bbox = {} self.im_dims = {} # for training self.X['train'] = tf.placeholder(tf.float32, [None, None, None, 3]) # [ batch_size, height, width, channel] self.gt_bbox['train'] = tf.placeholder(tf.int32, [None, 5]) self.im_dims['train'] = tf.placeholder(tf.int32, [None, 2]) # for testing self.X['test'] = tf.placeholder(tf.float32, [None, None, None, 3]) self.im_dims['test'] = tf.placeholder(tf.int32, [None, 2]) """ Define the network output """ self.cnn = {} self.rpn = {} self.roi = {} self.fastrcnn = {} self.anchor_scale = [ 8, 16, 32 ] # Anchor boxes will have dimensions scales * 16 * ratio in the image space self.num_steps = 100 self.datahandler = data_handler.DataHandler() self.batch_size = 1
def test_generate_main_summary(self): expect_json = ''' { "attr": "累計", "value": 19, "children": [ { "attr": "入院中", "value": 18 }, { "attr": "死亡", "value": 0 }, { "attr": "退院", "value": 1 } ] } '''.strip() dh = handler.DataHandler( patients_csvfile=self.patients_csvfile, data_summary_csvfile=self.data_summary_csvfile) result = dh.generate_main_summary() expect = json.loads(expect_json) expect["date"] = self.datetime_now_str self.assertDictEqual(result, expect)
def cve_filter(): query_parameters = request.args cve_id = query_parameters.get('id') dh = data_handler.DataHandler() if not cve_id: return page_not_found(404) return dh.querry_cve(cve_id)
def test_generate_patients(self): expect_json = ''' [{ "リリース日": "2020-03-17T08:00:00", "居住地": "大分市", "年代": "10代", "性別": "女性", "退院": "", "date": "2020-03-17" }, { "リリース日": "2020-03-19T08:00:00", "居住地": "臼杵市", "年代": "20代", "性別": "男性", "退院": "", "date": "2020-03-19" }, { "リリース日": "2020-03-19T08:00:00", "居住地": "臼杵市", "年代": "30代", "性別": "女性", "退院": "", "date": "2020-03-19" }, { "リリース日": "2020-03-20T08:00:00", "居住地": "大分市", "年代": "40代", "性別": "女性", "退院": "", "date": "2020-03-20" }, { "リリース日": "2020-03-20T08:00:00", "居住地": "大分市", "年代": "60代", "性別": "女性", "退院": "", "date": "2020-03-20" }, { "リリース日": "2020-03-20T08:00:00", "居住地": "大分市", "年代": "90代", "性別": "女性", "退院": "", "date": "2020-03-20" }] '''.strip() expect = json.loads(expect_json) dh = handler.DataHandler( patients_csvfile=self.patients_csvfile, data_summary_csvfile=self.data_summary_csvfile) result = dh.generate_patients() self.assertListEqual(result, expect)
def test_last_update(self): expect = self.datetime_now_str dh = handler.DataHandler( patients_csvfile=self.patients_csvfile, data_summary_csvfile=self.data_summary_csvfile) result = dh.generate_data()["lastUpdate"] self.assertEqual(result, expect)
def __init__(self, master): self.selected_frame = tk.IntVar() # set handlers self.data_handler = data_handler.DataHandler() #self.data_handler.set_view_handler(self) self.action_handler = control_handler.MainControl() self.action_handler.set_data_handler(self.data_handler) self.container = tk.Frame(master) self._recv_data() self.container.grid() self.frame_setup()
def handle(self): global client_map _, self.socket_id = self.request.getpeername() self.data_handler = data_handler.DataHandler(self.socket_id) client_map[self.socket_id] = self.request while True: try: data = self.request.recv(255).strip() message = Message(bytearray(data)[0]) if message: self.handlers[message](data) except (ConnectionResetError, IndexError) as e: self.handlers[Message.DISCONNECT](data) break
def test_generate_sickbeds_summary(self): expect_json = ''' { "入院患者数": 18, "病床数": 100 } '''.strip() dh = handler.DataHandler( patients_csvfile=self.patients_csvfile, data_summary_csvfile=self.data_summary_csvfile) result = dh.generate_sickbeds_summary() expect = json.loads(expect_json) self.assertDictEqual(result, expect)
def test_generate_patients_summary_by_age(self): expect_json = ''' { "10代以下": 1, "20代〜30代": 2, "40代〜50代": 1, "60代〜70代": 1, "80代以上": 1 } '''.strip() dh = handler.DataHandler( patients_csvfile=self.patients_csvfile, data_summary_csvfile=self.data_summary_csvfile) result = dh.generate_patients_summary_by_age() expect = json.loads(expect_json) self.assertDictEqual(result, expect)
def cpe_filter(): """ using localhost:5000/cpe?vendor=xx&product=yy syntax as it is apparently more flexible (multiple queries at once for instance) at least one of these needs to be defined, or it will return 404. :param vendor: (str) vendor :param product: (str) product :return: list of CVEs """ query_parameters = request.args vendor = query_parameters.get('vendor') product = query_parameters.get('product') dh = data_handler.DataHandler() if not (vendor or product): return page_not_found(404) return dh.querry_cpe(vendor, product)
# test_data_handler.py import sys sys.path.append('./pca_exp') sys.path.append('./pca_exp/generate_samples') import numpy as np import matplotlib.pyplot as plt import data_handler as dh from pca_machine import PCAMachine from kubo_toyabe import generateKT dataHandler = dh.DataHandler() stsp_lng = (10, 27) prenum_lng = 'R202' ext_lng = '.DAT' loc_lng = './tests/ExData/' stsp_ba = (56, 76) prenum_ba = 'EMU585' ext_ba = '.dat' loc_ba = './tests/BaFe2Se2O/' delimiter_ba = ',' skiprows_ba = 2 dataHandler.load_batch(stsp=stsp_lng, prenum=prenum_lng, ext=ext_lng, loc=loc_lng) dataHandler.load_batch(stsp=stsp_ba, prenum=prenum_ba, ext=ext_ba, loc=loc_ba, delimiter=delimiter_ba, skiprows=skiprows_ba)
import data_handler as data import model_simple as model NUM_EPOCHS = 10 # Number of epochs to train BATCH = 1 # How many items per iteration on GPU? ACCUM = 16 # Update gradients after how many iterations? T = 1 # Load how many frames from video? LR = 1e-3 # Initial learning rate GAMMA = 1e-5 # Regularization weights (L2) # Load data handler # PREFIX_RGB: Path to RGB frames (structured as PREFIX_RGB/video_id/frame_000001.jpg etc) # PREFIX_POSE: Path to Pose files (PREFIX_POSE/video_id.npy) # annotation: Annotation file, e.g., example.csv hnd_train = data.DataHandler( PREFIX_RGB = "/path/to/folder/", PREFIX_POSE = "/path/to/folder/", \ annotation = "example.csv", T=T, is_test=False, do_resize=None ) hnd_test = data.DataHandler( PREFIX_RGB = "/path/to/folder/", PREFIX_POSE = "/path/to/folder/", \ annotation = "example.csv", T=T, is_test=True, do_resize=None ) num_train = hnd_train.num() num_test = hnd_test.num() C = hnd_train.num_classes() # Build graph graph = model.Graph( T, C ) graph.build_graph() conf = tf.ConfigProto( gpu_options = tf.GPUOptions( allow_growth = True ),
# The present script is used to run properly the program. # It creates the objects for the three main classes (CLI, DataHandler and MethodHandler) and initializes # the cmdloop in which user's commands are processed. import cli import data_handler as dh import method_handler as mh import sys sys.stdout.write( 'Welcome to FinData: a simple CLI for retrieving financial data from finnhub ' + '(v. 1.0 - 14 feb 2021)' + '\n') token = 'c0f7i1748v6snrib4ca0' dataHandler = dh.DataHandler(token) dataHandler.load_db() methodHandler = mh.MethodHandler(dataHandler, token) cli_app = cli.CLI(methodHandler) cli_app.cmdloop()
plt.imshow(vis) plt.show() fcn_net = fcn8.FCN(num_classes=2, batch_size=BATCH_SIZE) # construct model images = tf.placeholder(tf.float32, [BATCH_SIZE, IMAGE_H, IMAGE_W, IMAGE_C]) labels = tf.placeholder(tf.int64, [BATCH_SIZE, IMAGE_H, IMAGE_W, num_classes]) fcn_net.build_seg_net(images) # set gpu configuration conf = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True)) # load data hnd_data_train = data_handler.DataHandler() hnd_data_train.get_file_list('/home/fensi/nas/KITTI_ROAD/train.txt', '/home/fensi/nas/KITTI_ROAD/label.txt') hnd_data_test = data_handler.DataHandler() hnd_data_test.get_file_list('/home/fensi/nas/KITTI_ROAD/test.txt') # loss_op need to be called before train_op, please see the definition of train_op in fcn8.py fcn_loss = fcn_net.loss_op(logits=fcn_net.result, labels=labels) fcn_op = fcn_net.train_op() num_steps = 200 with tf.Session(config=conf) as sess: init = tf.global_variables_initializer() sess.run(init) # training
contains_full_path = True model_name = 'sim_model.ckpt' else: vec_spec = vehicle_spec.VehicleSpec(angle_norm=30, image_crop_vert=[220, 480]) data_path = '/home/elschuer/data/LaneKeepingE2E/images_train_augmented/' desc_file = 'data_labels.csv' contains_full_path = True model_name = 'nvidia_model.h5' convert_image = False image_channels = 1 data_handler = data_handler.DataHandler( data_path, desc_file, vehicle_spec=vec_spec, contains_full_path=contains_full_path, convert_image=convert_image, image_channels=1) data_handler.read_data() if analyze_data: data_analyzer = data_analyzer.DataAnalyzer() data_analyzer.showDataDistribution(data_handler.y_data) data_analyzer.print_samples_not_equal_zero(data_handler.y_data) model_trainer = ModelTrainer(epochs=10, data_handler=data_handler, model_name=model_name) model_trainer.train_model()
def __init__(self): super(MainWidget, self).__init__() self.handler = data_handler.DataHandler() self.init_ui()
def test_init(self): assert_raises(data_handler.DataHandler(provider="foo")) assert_raises(data_handler.DataHandler(parser="bar"))
# [606,179,683,230,1], # [589,178,643,218,1], # [402,178,457,220,1], # [534,175,578,204,1]]) # # label2=np.array([1,2,3,4]) # # # print(label_gt) # # image_dim=[[ 375, 1242]] # print(label_gt,image_dim) rpn = [] feat_stride = 16 anchor_scale = [8, 16, 32] data_handler = data_handler.DataHandler() data = "../../Datasets/kitti/train.txt" label_file = "../../Datasets/kitti/label.txt" data_handler.get_file_list(data, label_file) data1, labels1, ims = data_handler.load_data(0, 1) print(data1.shape, labels1.shape, ims.shape) print(labels1, ims) # rpn = region_proposal_network.RegionProposalNetwork(feature_vector=data1,ground_truth=labels1,im_dims=ims,anchor_scale=anchor_scale,Mode="train") rpn_label, rpn_bbox_targets, rpn_bbox_inside_weights, rpn_bbox_outside_weights = anchor_target_layer.anchor_target_layer_python( rpn_cls_score=data1, gt_boxes=labels1, im_dims=ims, feat_strides=feat_stride, anchor_scales=anchor_scale) (rpn_label)
outputs={'prediction': cnn.layers['pred']}) #TEST test_times.append(time.time()) predictions = evaluate(epoch, test_length, test_batch_size, x_test, y_test) test_times[-1] = time.time() - test_times[-1] return y_test, np.array( predictions[1], dtype=np.uint8 ), accuracies, best_result, training_time, test_times, out train_files, test_files = get_files(options.root) report_out += print_files(options.root) db = dh.DataHandler(train_files, test_files) report_out += print_data_info(db) shuffled = np.random.permutation(range(db.train_size)) db.x_train = db.x_train[shuffled] db.y_train = db.y_train[shuffled] labels, predictions, accuracies, best_result, training_time, test_times, training_out = train_evaluate( db.x_train, db.y_train, db.x_test, db.y_test, db.vocab_size, db.max_len, db.classes, db.num_classes, options.architecture, options.activation_functions, options.widths, options.strides, options.dilations, options.feature_maps, get_optimizer(options.optimizer, options.learning_rate), options.l2, options.train_batch_size, options.test_batch_size, options.epochs, options.dropout)