コード例 #1
0
ファイル: app.py プロジェクト: FieryDarkWraith/DefHacks
def login():
    error = None
    if request.method == 'POST':
        Username = request.form['username']
        Pass = request.form['password']
        g.db = connect_db()
        c1 = g.db.execute("SELECT pass FROM posts WHERE username ='******'" %Username)
        c2 = g.db.execute("SELECT * FROM posts WHERE username ='******'" %Username)
        passpull = c1.fetchone()
        if request.form['mode'] == "Register":
            return redirect(url_for('register'))
        elif passpull[0] != hashpass(Pass) or c2.fetchone()[0] != Username:
            error = passpull[0] + " : " + hashpass(Pass)
        else:
            session['username'] = Username
            if userdata.getP(str(Username)) == []:
                p = Queue.PriorityQueue()
                main.initialize(p, Username)
                session["queue"] = main.pToLoL(p);
            else:
                p = main.LoLToP(userdata.getP(Username))
                session["queue"] = main.pToLoL(p);
            flash('You were logged in')
            session["justlogin"] = True
            return redirect(url_for('home'))
    return render_template('login.html', error=error)
コード例 #2
0
def evaluate_agent(loop_count, test_random):
    """Evaluate agent's performance after Q-learning"""
    global states, q_table

    # TODO
    # boolean to test only random fish
    # action = 4

    total_age, total_rewards, total_average_rewards_per_year = 0, 0, 0
    episodes = 100

    for i in range(episodes):
        print("Testing episode {0}".format(i))

        initialize(RL=True)

        rewards_in_episode = 0

        done = False
        state = INIT_STATE
        while not done:
            if test_random:
                action = ACTIONS_SPACE_LEN - 1
            else:
                action = get_best_action(state)
            done = env_step(action)

            # tuple: ([energy, hp, num of near small placton, near big, far small, far big], reward, age, years_passed)
            state, reward, age, years_passed = get_RL_fish_state()
            # print(state, reward, age, years_passed)
            rewards_in_episode += reward

        # passed one year less because we are starting from "1"
        years_passed -= 1

        total_age += age
        total_rewards += rewards_in_episode
        total_average_rewards_per_year += (rewards_in_episode / years_passed)

    with open("results", 'a') as text_file:
        if test_random:
            print('Results for random fish', file=text_file)

        print('Results in {} loop'.format(loop_count), file=text_file)
        print('Average age per episode: {}'.format(total_age / episodes), file=text_file)
        print('Average total rewards per episode: {}'.format(int(total_rewards / episodes)), file=text_file)
        print('Average of total average rewards per year: {}'.format(int(total_average_rewards_per_year / episodes)),
              file=text_file)

    if test_random:
        print('Results for random fish')
    print('Results in {} loop'.format(loop_count))
    print('Average age per episode: {}'.format(total_age / episodes))
    print('Average total rewards per episode: {}'.format(int(total_rewards / episodes)))
    print('Average of total average rewards per year: {}'.format(int(total_average_rewards_per_year / episodes)))
    def test_lambda_handler_makes_prediction(self):
        main.initialize(param_path)
        main.client.publish = MagicMock()

        event = {}
        event['filepath'] = './resources/img/blue_box_000001.jpg'
        response = main.lambda_handler(event, {})

        # Assert message published correctly
        self.assertEqual(response['prediction'][0][0], 0.0)
        main.client.publish.assert_called_with(topic='blog/infer/output', payload=json.dumps(response))
コード例 #4
0
def ocean_price():
    """
    :param:privatekey,pool address, did
    :return:token prize in comparision to the ocean token
    """
    if request.is_json:
        # a buy parameter which will tell the ocean to but the shit or not ? true or false
        content = request.get_json()
        # private key here will be the consumers private key and not the publishers private key
        private_key = content["privatekey"]
        did = content["did"]
        token_address = content["token"]

        pool_address = content["pool_address"]
        ocean, wallet = initialize(private_key)
        data_token = ocean.get_data_token(token_address)
        # asset=ocean.assets.resolve(did)
        # print(asset.did)
        # service=asset.get_service(ServiceTypes.ASSET_ACCESS)
        OCEAN_address = "0x8967BCF84170c91B0d24D4302C2376283b0B3a07"
        # OCEAN_address=ocean.pool.ocean_address
        # print(OCEAN_address)
        # price_in_ocean=ocean.pool.calcInGivenOut(
        #     pool_address,OCEAN_address,token_address,token_out_amount=1.0
        # )
        # print(price_in_ocean)
        # if content["buy"]=="false":
        #     return {"pool_address":pool_address,"price":price_in_ocean} ,200
        # else:
        ocean.pool.buy_data_tokens(pool_address,
                                   amount=1.0,
                                   max_OCEAN_amount=10,
                                   from_wallet=wallet)
        return {"message": "ok"}, 200
コード例 #5
0
def datatoken():
    """
    :param:private_key of the publisher
    :return:token_name,token_address,token_hash
    """
    try:
        content = request.get_json()
        print(content["privatekey"])
        ocean, wallet = initialize(content["privatekey"])
        # service_attributes=return_service_attribute(wallet)
        print(wallet.address)
        # print(service_attributes)
        token_name = [
            random.choice("abcdefghizklmnopqrstyvwxyz") for _ in range(10)
        ]
        name1 = "DataToken" + str(random.randint(0, 1000000))
        token_name = "".join(x for x in token_name)
        print(token_name, name1)

        data_token, token_address = create_token(name1, token_name, wallet,
                                                 ocean)
        mint_hash = data_token.mint_tokens(wallet.address, 1000, wallet)

        return {
            "token_name": token_name,
            "token_hash": mint_hash,
            "token_address": token_address
        }, 200
    except Exception as e:
        return {"message": "error"}, 404
コード例 #6
0
def download():
    """
    :param:privatekey,did,numbers
    :return: whole dataset or the names for the requested numbers
    """
    if request.is_json:
        content = request.get_json()
        if content["service"] == "download":
            privatekey = content["privatekey"]
            ocean, wallet = initialize(privatekey)
            did = content["did"]
            asset = ocean.assets.resolve(did)
            service = asset.get_service(ServiceTypes.ASSET_ACCESS)
            print(service)
            quote = ocean.assets.order(asset.did,
                                       wallet.address,
                                       service_index=service.index)
            # putting the buy option here
            token_address = quote.data_token_address
            print(token_address)

            # OCEAN_address = ocean.pool.ocean_address
            # pool_address = content["pool_address"]
            # price_in_ocean = ocean.pool.calcInGivenOut(
            #     pool_address, OCEAN_address, token_address, token_out_amount=1.0
            # )
            # print(price_in_ocean)
            # data_token = ocean.pool.buy_data_tokens(pool_address, amount=1.0, max_OCEAN_amount=price_in_ocean,
            #                               from_wallet=wallet)
            # print(data_token)
            market_address = "0x8967BCF84170c91B0d24D4302C2376283b0B3a07"
            order_tx_id = ocean.assets.pay_for_service(
                quote.amount, quote.data_token_address, asset.did,
                service.index, market_address, wallet)
            print(order_tx_id)
            filepath = ocean.assets.download(asset.did,
                                             service.index,
                                             wallet,
                                             order_tx_id,
                                             destination="./files")
            if "numbers" not in content.keys():
                zip_filename = "data" + token_address + ".zip"
                zipf = zipfile.ZipFile(zip_filename, 'w', zipfile.ZIP_DEFLATED)
                zipdir(filepath, zipf)
                zipf.close()

                send_file(zip_filename)
                return {"message": "ok", "order_txn": order_tx_id}, 200
            else:
                numbers = content["numbers"]
                return_data = get_compute_back(did[7:], numbers)
                if len(return_data.keys()) == 0:
                    return_data["message"] = "data not found"
                    return return_data, 404
                return_data["message"] = "data found"
                return return_data, 200
コード例 #7
0
def initialTrain():
    state = main.getData()
    main.startGame()
    # If it's the first step
    while not main.getDone():

        # Random action
        action = random.choice([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0],
                                [0, 0, 0, 1]])

        # Get the rewards
        reward = main.getReward(action)
        # Look if the episode is finished
        done = main.getDone()

        # If we're dead
        if done:
            # We finished the episode
            next_state = np.zeros([5])

            # Add experience to memory
            memory.add((state, action, reward, next_state, done))

            # First we need a state
            state = main.getData()

            # Stack the frames

        else:
            # Get the next state
            next_state = main.getData()

            # Add experience to memory
            memory.add((state, action, reward, next_state, done))

            # Our state is now the next_state
            state = next_state
    main.initialize()

    train()
コード例 #8
0
def compute():
    """
    :param:in development correspoding to the ocean protocol
    :return:
    """
    if request.is_json:
        content = request.get_json()
        did = content["did"]
        privatekey = content["privatekey"]
        ocean, wallet = initialize(privatekey)
        order_tx_id = content["order_txn"]
        nonce = 10000
        job_id = trying.trying_algorithm_run(did, wallet, order_tx_id, nonce)
        return {"job_id": job_id}, 200
コード例 #9
0
def test_initialize():
    config = Namespace(
        model="bert-base-uncased",
        model_dir="/tmp",
        dropout=0.3,
        n_fc=768,
        num_classes=1,
        learning_rate=1e-3,
        weight_decay=1e-4,
        num_iters_per_epoch=1,
        num_warmup_epochs=1,
        max_epochs=1,
    )
    model, optimizer, loss_fn, lr_scheduler = initialize(config)
    assert isinstance(model, nn.Module)
    assert isinstance(optimizer, optim.Optimizer)
    assert isinstance(loss_fn, nn.Module)
    assert isinstance(lr_scheduler, (_LRScheduler, ParamScheduler))
コード例 #10
0
def publish():
    """
    :param:privatekkey, token,url for the file
    :return: did for the published asset
    """
    content = request.get_json()
    token_address = content["token"]
    ocean, wallet = initialize(content["privatekey"])
    print(wallet.address)
    s = return_service_attribute(wallet.address)
    url = content["url"]
    metadata = get_meta_data(url)
    asset = create_asset(ocean=ocean,
                         wallet=wallet,
                         metadata=metadata,
                         service_attributes=s,
                         token_address=token_address)
    # print(asset)
    return {"token": token_address, "did": asset.did}
コード例 #11
0
def make_pool():
    """
    :param : private_key,token_address
    :return: retursn the token address and the pool address
    """
    if request.is_json:
        content = request.get_json()
        private_key = content["privatekey"]
        token_address = content["token"]
        # data_amount=content["data_amount"]
        # ocean_amount=content["ocean_amount"]

        ocean, wallet = initialize(private_key=private_key)
        pool_address = makepool(ocean, wallet, token_address)
        return {
            "token_address": token_address,
            "pool_address": pool_address
        }, 200
    else:
        return {}, 404
class TestModelLoader(unittest.TestCase):
    main.initialize(param_path)

    def test_initialize_model(self):
        main.initialize(param_path)
        self.assertIsNotNone(main.model, 'should return an initialized model object')

    def test_lambda_handler_makes_prediction(self):
        main.initialize(param_path)
        main.client.publish = MagicMock()

        event = {}
        event['filepath'] = './resources/img/blue_box_000001.jpg'
        response = main.lambda_handler(event, {})

        # Assert message published correctly
        self.assertEqual(response['prediction'][0][0], 0.0)
        main.client.publish.assert_called_with(topic='blog/infer/output', payload=json.dumps(response))

    def test_lambda_handler_noops_empty_filepath(self):
        event = {}
        response = main.lambda_handler(event, {})
        self.assertIsNone(response, 'Should return none if no filepath found')
コード例 #13
0
ファイル: dcr.py プロジェクト: ankitshah009/Object_detection
def train(config):
	eval_target = ["Vehicle", "Person", "Prop", "Push_Pulled_Object", "Bike"]
	eval_target = {one:1 for one in eval_target}

	# for weighted loss
	"""
	"BG":0,
	"Vehicle":1,
	"Person":2,
	"Parking_Meter":3,
	"Tree":4,
	"Skateboard":5,
	"Prop_Overshoulder":6,
	"Construction_Barrier":7,
	"Door":8,
	"Dumpster":9,
	"Push_Pulled_Object":10,
	"Construction_Vehicle":11,
	"Prop":12,
	"Bike":13,
	"Animal":14,
	"""
	# for weighted loss if used
	config.class_weights = {i:1.0 for i in xrange(config.num_class)}
	config.class_weights[10] = 2.0
	config.class_weights[12] = 2.0
	config.class_weights[13] = 2.0

	train_data = read_data(config, config.filelst, config.annopath, config.framepath, is_train=True)
	val_data = read_data(config, config.valfilelst, config.valannopath, config.valframepath, is_train=False)
	config.train_num_examples = train_data.num_examples

	# the total step (iteration) the model will run
	num_steps = int(math.ceil(train_data.num_examples/float(config.im_batch_size)))*config.num_epochs
	num_val_steps = int(math.ceil(val_data.num_examples/float(config.im_batch_size)))*1

	models = []
	gpuids = range(config.gpuid_start, config.gpuid_start+config.gpu)
	gpuids = gpuids * config.model_per_gpu 
	# example, model_per_gpu=2, gpu=2, gpuid_start=0
	gpuids.sort()
	taskids = range(config.model_per_gpu) * config.gpu # [0,1,0,1]

	for i,j in zip(gpuids,taskids):
		models.append(get_model(config, gpuid=i, task=j, controller=config.controller))

	config.is_train=False
	models_eval = []
	for i,j in zip(gpuids,taskids):
		models_eval.append(get_model(config,gpuid=i,task=j,controller=config.controller))
	config.is_train=True

	trainer = Trainer(models,config)
	tester = Tester(models_eval,config) # need final box and stuff?

	saver = tf.train.Saver(max_to_keep=5) # how many model to keep
	bestsaver = tf.train.Saver(max_to_keep=5) # just for saving the best model

	# start training!
	# allow_soft_placement :  tf will auto select other device if the tf.device(*) not available

	tfconfig = tf.ConfigProto(allow_soft_placement=True)#,log_device_placement=True)
	if not config.use_all_mem:
		tfconfig.gpu_options.allow_growth = True # this way it will only allocate nessasary gpu, not take all

	tfconfig.gpu_options.visible_device_list = "%s"%(",".join(["%s"%i for i in range(config.gpuid_start, config.gpuid_start+config.gpu)])) # so only this gpu will be used
	# or you can set hard limit
	#tfconfig.gpu_options.per_process_gpu_memory_fraction = 0.4
	with tf.Session(config=tfconfig) as sess:
		initialize(load=config.load, load_best=config.load_best, config=config, sess=sess)

		isStart = True

		best = (-1.0, 1, "AP_mul")
		loss_me, box_label_loss_me, wd_me, lr_me = [FIFO_ME(config.loss_me_step) for i in xrange(4)]
		for batch in tqdm(train_data.get_batches(config.im_batch_size, num_batches=num_steps),total=num_steps,ascii=True,smoothing=1):
			global_step = sess.run(models[0].global_step) + 1 # start from 0 or the previous step

			validation_performance = None
			if (global_step % config.save_period == 0) or (config.load and isStart and ((config.ignore_vars is None) or config.force_first_eval)): # time to save model
				tqdm.write("step:%s/%s (epoch:%.3f)"%(global_step,num_steps,(config.num_epochs*global_step/float(num_steps))))
				tqdm.write("\tsaving model %s..."%global_step)
				saver.save(sess,os.path.join(config.save_dir,"model"),global_step=global_step)
				tqdm.write("\tdone")
				if config.skip_first_eval and isStart:
					tqdm.write("skipped first eval...")
					validation_performance = config.best_first
					this_val_best_type = "null"
				else:
					e = {one:[] for one in eval_target.keys()}
					e_mul = {one:[] for one in eval_target.keys()} # this will be produced by drc_prob * frcnn_prob
					for val_batch_ in tqdm(val_data.get_batches(config.im_batch_size, num_batches=num_val_steps, shuffle=False), total=num_val_steps, ascii=True, smoothing=1):
						batch_idx, val_batches = val_batch_
						this_batch_num = len(val_batches)
						# multiple image at a time for parallel inferencing with multiple gpu
						imgids = []
						for val_batch in val_batches:
							# load the image here and resize
							image = cv2.imread(val_batch.data['imgs'][0], cv2.IMREAD_COLOR)
							imgid = os.path.splitext(os.path.basename(val_batch.data['imgs'][0]))[0]
							imgids.append(imgid)
							assert image is not None, image
							image = image.astype("float32")
							val_batch.data['imgdata'] = [image]

							resized_image = resizeImage(image, config.short_edge_size, config.max_size)

							# rememember the scale and original image
							ori_shape = image.shape[:2]
							#print image.shape, resized_image.shape
							# average H/h and W/w ?

							val_batch.data['resized_image'] = [resized_image]

						# since the val_batch['boxes'] could be (1000, 4), we need to break them down
						split_val_batches = split_batch_by_box_num(val_batch_, config.test_box_batch_size)

						ori_box_nums = [b.data['gt'][0]['boxes'].shape[0] for b in val_batches]

						outputs = [[] for _ in xrange(this_batch_num)]
						for split_val_batch in split_val_batches:
							this_outputs = tester.step(sess, split_val_batch)
							for i, this_output in enumerate(this_outputs):
								outputs[i].append(this_output[0]) # [K, num_class]

						# re-asssemble the boxes
						for i in xrange(len(outputs)):
							outputs[i] = np.concatenate(outputs[i], axis=0)[:ori_box_nums[i], :]

						# post process this batch, also remember the ground truth
						for i in xrange(this_batch_num): # num gpu
							imgid = imgids[i]

							box_yp = outputs[i] # [K, num_class]
							
							val_batch = val_batches[i]

							anno = val_batch.data['gt'][0] # one val_batch is single image

							assert len(anno['boxes']) == len(anno['labels']) == len(box_yp)

							for eval_class in e:
								classIdx = targetClass2id[eval_class]

								# (K scores, K 1/0 labels)
								bin_labels = anno['labels'] == classIdx
								this_yp = box_yp[:, classIdx] # [K]
								# frcnn is [num_class-1, K]
								this_yp_mul = this_yp * anno['frcnn_probs'][classIdx-1, :]

								e[eval_class].extend(zip(this_yp, bin_labels))
								e_mul[eval_class].extend(zip(this_yp_mul, bin_labels))
					aps = []
					aps_mul = []
					for eval_class in e:
						AP = compute_AP(e[eval_class])
						aps.append((eval_class, AP))
						AP_mul = compute_AP(e_mul[eval_class])
						aps_mul.append((eval_class, AP_mul))
					average_ap = np.mean([ap for _, ap in aps])
					average_ap_mul = np.mean([ap for _, ap in aps_mul])

					validation_performance = max([average_ap_mul, average_ap])
					this_val_best_type = "AP_mul" if average_ap_mul >= average_ap else "AP"

					details = "|".join(["%s:%.5f"%(classname, ap) for classname, ap in aps])
					details_mul = "|".join(["%s:%.5f"%(classname, ap) for classname, ap in aps_mul])

					tqdm.write("\tval in %s at step %s, mean AP:%.5f, details: %s ---- mean AP_mul is %.5f, details: %s. ---- previous best at %s is %.5f, type: %s"%(num_val_steps, global_step, average_ap, details, average_ap_mul, details_mul, best[1], best[0], best[2]))

				if validation_performance > best[0]:
					tqdm.write("\tsaving best model %s..." % global_step)
					bestsaver.save(sess,os.path.join(config.save_dir_best, "model"), global_step=global_step)
					tqdm.write("\tdone")
					best = (validation_performance, global_step, this_val_best_type)

				isStart = False
			
			# skip if the batch is not complete, usually the last few ones
			# lazy as f**k
			if len(batch[1]) != config.gpu:
				continue

			try:
				loss, wds, box_label_losses, lr = trainer.step(sess,batch)
			except Exception as e:
				print e
				bs = batch[1]
				print "trainer error, batch files:%s"%([b.data['imgs'] for b in bs])
				sys.exit()
			

			if math.isnan(loss):
				tqdm.write("warning, nan loss: loss:%s, box_label_loss:%s"%(loss, box_label_losses))
				print "batch:%s"%([b.data['imgs'] for b in batch[1]])
				sys.exit()

			# use moving average to compute loss

			loss_me.put(loss)
			lr_me.put(lr)
			for wd, box_label_loss in zip(wds, box_label_losses):
				wd_me.put(wd)
				box_label_loss_me.put(box_label_loss)

			if global_step % config.show_loss_period == 0:
				tqdm.write("step %s, moving average: learning_rate %.6f, loss %.6f, weight decay loss %.6f, box_label_loss %.6f" % (global_step, lr_me.me(), loss_me.me(), wd_me.me(), box_label_loss_me.me()))
コード例 #14
0
ファイル: apps.py プロジェクト: onecityuni/luci-py
# Copyright 2015 The LUCI Authors. All rights reserved.
# Use of this source code is governed by the Apache v2.0 license that can be
# found in the LICENSE file.

"""Actual WSGI app instantiations used from app.yaml.

Function 'main.initialize' must be called from a separate module
not imported in tests.
"""

import main

html, endpoints, backend = main.initialize()
コード例 #15
0
ファイル: app_testcase.py プロジェクト: utgwkk/le4-database
 def initialize(self):
     main.initialize()
コード例 #16
0
ファイル: dcr.py プロジェクト: ankitshah009/Object_detection
def forward(config):
	# the annopath is the box output from fastrcnn model
	# given the filelst, framepath, annopath, we get new classification score for each box, then do nms, then get the final json output
	all_filenames = [os.path.splitext(os.path.basename(line.strip()))[0] for line in open(config.filelst, "r").readlines()]
	print "total image to test %s"%len(all_filenames)

	if not os.path.exists(config.outbasepath):
		os.makedirs(config.outbasepath)

	models = []
	for i in xrange(config.gpuid_start, config.gpuid_start+config.gpu):
		models.append(get_model(config, i, controller=config.controller))
	tester = Tester(models, config) 

	tfconfig = tf.ConfigProto(allow_soft_placement=True)
	if not config.use_all_mem:
		tfconfig.gpu_options.allow_growth = True # this way it will only allocate nessasary gpu, not take all
	
	tfconfig.gpu_options.visible_device_list = "%s" % (",".join(["%s" % i for i in range(config.gpuid_start, config.gpuid_start+config.gpu)])) # so only this gpu will be used

	with tf.Session(config=tfconfig) as sess:

		initialize(load=True, load_best=config.load_best, config=config, sess=sess)
		# num_epoch should be 1
		assert config.num_epochs == 1

		for filenames in tqdm(grouper(all_filenames, config.im_batch_size),ascii=True):
			filenames = [filename for filename in filenames if filename is not None]
			this_batch_num = len(filenames)

			if this_batch_num != config.im_batch_size:
				need = config.im_batch_size - this_batch_num
				filenames.extend(all_filenames[:need])
			
			ori_probs = []
			ori_frcnn_boxes = []
			datas = [] # should be a list of Dataset obj
			ori_box_nums = []
			for i, filename in enumerate(filenames):
				data = {"imgs":[], "imgdata":[], "gt":[]}

				videoname = filename.split("_F_")[0]
				image = os.path.join(config.framepath, videoname, "%s.jpg"%filename)
				box_npz = os.path.join(config.annopath, "%s.npz"%filename)

				box_data = dict(np.load(box_npz))
				im = cv2.imread(image, cv2.IMREAD_COLOR)
				
				ori_shape = im.shape[:2]

				resized_image = resizeImage(im, config.short_edge_size, config.max_size)

				# [K, 4] 
				boxes = box_data['frcnn_boxes'].copy()
				
				data['imgs'].append(image)
				
				data['gt'].append({
					"boxes": boxes,
				})
				data = Dataset(data, add_gt=True)
				data.data['imgdata'] = [im]
				data.data['resized_image'] = [resized_image]

				datas.append(data)

				ori_box_nums.append(len(boxes))

				# [C, K]
				ori_probs.append(box_data['frcnn_probs'])
				# [K, 4]
				ori_frcnn_boxes.append(box_data['frcnn_boxes'])

			# data is num_gpu images, but each has multiple boxes,
			# so split into K jobs, each job is num_gpu images
			mini_datas = split_batch_by_box_num(([], datas), config.test_box_batch_size)
			
			outputs = [[] for _ in xrange(this_batch_num)] # num_gpu
			for mini_data in mini_datas:
				this_outputs = tester.step(sess, mini_data)
				for i in xrange(this_batch_num):
					outputs[i].append(this_outputs[i][0]) # [num_box_test_box_batch_size, num_class]

			# re-assemble boxes
			for i in xrange(this_batch_num):
				outputs[i] = np.concatenate(outputs[i], axis=0)[:ori_box_nums[i], :]

			for i, output in enumerate(outputs): # num_gpu
				# [K, num_class]
				dcr_prob = output
				dcr_prob = dcr_prob[:, 1:] # [K, C]
				# [C, K]
				dcr_prob = np.transpose(dcr_prob, axes=[1, 0]) 

				C = dcr_prob.shape[0]
				# [C, K]
				# only use the dcr model output
				final_probs = dcr_prob
				if args.use_mul:
					ori_prob = ori_probs[i]
					final_probs = ori_prob * dcr_prob

				# [K, 4] for class agnostic
				ori_frcnn_box = ori_frcnn_boxes[i]
				if len(ori_frcnn_box.shape) == 2:
					ori_frcnn_box = np.tile(np.expand_dims(ori_frcnn_box, axis=0), [C, 1, 1])

				final_boxes, final_labels, final_probs = nms_wrapper(ori_frcnn_box, final_probs, config)

				pred = []

				for j,(box, prob, label) in enumerate(zip(final_boxes, final_probs, final_labels)):
					box[2] -= box[0]
					box[3] -= box[1] # produce x,y,w,h output

					cat_id = int(label)
					cat_name = targetid2class[cat_id]
					
					rle = None
					
					res = {
						"category_id": cat_id,
						"cat_name": cat_name, # [0-80]
						"score": float(round(prob, 4)),
						"bbox": list(map(lambda x:float(round(x,1)),box)),
						"segmentation":rle,
					}

					pred.append(res)

				# save the data
				filename = filenames[i]
				resultfile = os.path.join(config.outbasepath, "%s.json"%filename)
				with open(resultfile, "w") as f:
					json.dump(pred, f)
コード例 #17
0
 def start(self):
     self.text_1.delete('1.0', END)
     self.text_1.insert(END,"Starting...\n")
     acc = mlp.initialize(self.layers,self.epoc,self.dset)
     self.text_1.insert(END,"Done. \nAccuracy:" + acc)
コード例 #18
0
def train_agent(loop_count):
    """Training the agent"""
    # TODO
    # used?
    global q_table, states

    # Hyperparameters

    #  learning rate (0<α≤1)
    #  Just like in supervised learning settings, α is the extent to which our Q-values
    #  are being updated in every iteration
    alpha = 0.1

    # γ  (gamma) is the discount factor (0≤γ≤1)
    # determines how much importance we want to give to future rewards.
    # A high value for the discount factor (close to 1) captures the long-term effective award
    # whereas, a discount factor of 0 makes our agent consider only immediate reward, hence making it greedy.
    gamma = 0.6

    # We want to prevent the action from always taking the same route, and possibly overfitting,
    # so we'll be introducing another parameter called ϵ "epsilon" to cater to this during training.
    epsilon = 0.1

    for i in range(1, 1001):
        # because it was trained before

        real_episode_count = i + loop_count * 1000

        start = time.time()

        print("Training episode {0}".format(real_episode_count))
        initialize(RL=True)

        done = False
        state = INIT_STATE
        while not done:
            if random.uniform(0, 1) < epsilon:
                action = random.randint(0, ACTIONS_SPACE_LEN - 1)  # Explore action space
            else:
                action = get_best_action(state)  # Exploit learned values

            done = env_step(action)
            next_state, reward, _, _ = get_RL_fish_state()

            old_reward_value = get_reward_value(state, action)
            next_max = get_max_reward(next_state)

            new_reward_value = (1 - alpha) * old_reward_value + alpha * (reward + gamma * next_max)
            insert_to_lists(state, action, new_reward_value)

            state = next_state

        if i % 50 == 0:
            write_states_and_rewards_to_files(real_episode_count)
            # old_write_statues_rewards_to_file(real_episode_count)

        end = time.time()
        elapsed_time = end - start
        print("Episode training time: " + str(time.strftime("%H:%M:%S", time.gmtime(elapsed_time))))

    print("Training finished.\n")
 def test_initialize_model(self):
     main.initialize(param_path)
     self.assertIsNotNone(main.model, 'should return an initialized model object')
コード例 #20
0
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Actual WSGI app instantiations used from app.yaml.

Needed to call 'main.initialize' from a separate module not imported in tests.
"""

import main

html_app, apis_app = main.initialize()
コード例 #21
0
 def buildMechanicalWorld(self):
     return vision.initialize()
コード例 #22
0
ファイル: apps.py プロジェクト: eunchong/infra
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.

"""Actual WSGI app instantiations used from app.yaml.

Extracted to a separate module to avoid calling 'initialize' in unit tests
during module loading time.
"""

import main

endpoints_app, frontend_app, backend_app = main.initialize()
コード例 #23
0
    print("score (model 2): %.4f %.4f" %
          get_f1(golden,
                 all_generated_2,
                 is_file_content=True,
                 no_rel_name=generator.get_no_rel_name()))


if __name__ == "__main__":
    C, logger = get_config()
    #fitlog.debug()
    C.info += "-watch"

    #----- prepare data and some global variables -----
    data_train, data_test, data_valid, relations, rel_weights = load_data(
        C, logger)
    _, loss_func, generator = initialize(C, logger, relations, rel_weights)

    if C.watch_type == "train":
        data_watch = data_train
    if C.watch_type == "test":
        data_watch = data_test
    if C.watch_type == "valid":
        data_watch = data_valid

    #----- load model -----

    if not C.model_save or not C.model_save_2:
        raise "model information incomplete"

    with open(C.model_save, "rb") as fil:
        models_1 = pickle.load(fil)
コード例 #24
0
refs= ['data\\CZ_KKY_APK_dump.ref-1.pkl']

import pickle
import time
start = time.time()
from main import initialize, find_similar
print( " - doba importu:", time.time()-start)

n=1

for refx in refs:
    pkl_file = open(refx, 'rb')
    ref = pickle.load(pkl_file)
    pkl_file.close()
    start = time.time()
    print(refx)
    data = initialize("data\\CZ_KKY_APK_dump.txt", 'utf-8')
    print( " - doba načtení:", time.time()-start)
    chyb = 0
    start = time.time()
    for i in range(n):
        for word,dist in ref:
            similar = find_similar(word, data, distance = dist)
            chyb_local = ref[(word,dist)].symmetric_difference(similar)   # s.symmetric_difference(t) 	reslult is a new set with elements in either s or t but not both
            if len(chyb_local) > 0:
                print( "\nchyba    :", word, dist)
                print( "nalezeno :", similar)
                print( "správně  :", ref[(word,dist)])
            chyb += len(chyb_local)
    print( " - doba hledání prumer:", (time.time()-start)/n)
    print( 'errors:', chyb)
コード例 #25
0
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Actual WSGI app instantiations used from app.yaml.

Function 'frontend.initialize' must be called from a separate module
not imported in tests.
"""

# Assert that "google.protobuf" imports.
import google.protobuf

import main

frontend, backend = main.initialize()
コード例 #26
0
ファイル: start.py プロジェクト: sachitgupta25/MyBank
from env_settings import *
import os
from bank_opertions.operations import Operations
from main import show_welcome_screen, initialize, display_menu

if __name__ == "__main__":
    initialize()
    show_welcome_screen()
    # Run only first time. Not afterwards. Use file arguments
    display_menu()
コード例 #27
0
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Actual WSGI app instantiations used from app.yaml.

Extracted to a separate module to avoid calling 'initialize' in unit tests
during module loading time.
"""

import main

endpoints_app, frontend_app, backend_app = main.initialize()