def io():
	global serial_no
	ws = request.environ['wsgi.websocket'] 
	if not ws:
		abort(400)
	logger = utils.logger_init('%s/%s.json' % (LOGS_PATH, serial_no))
	results = utils.logger_init('%s/%s.json' % (RESULTS_PATH, serial_no))
	if UI_DEBUG:
		time.sleep(0.1)
		utils.websocket_send(ws, '{"tester":"DI","result":true}', results)
		time.sleep(0.1)
		utils.websocket_send(ws, '{"tester":"ADC","result":true}', results)
		time.sleep(0.1)
		utils.websocket_send(ws, '{"tester":"UART","result":true}', results)
		time.sleep(0.1)
		utils.websocket_send(ws, '{"tester":"I2C","result":true}', results)
		time.sleep(0.1)
		utils.websocket_send(ws, '{"tester":"9-Axis","result":true}', results)
		time.sleep(0.1)
		utils.websocket_send(ws, '{"tester":"Airpressure","result":true}', results)
		time.sleep(0.1)
		utils.websocket_send(ws, '{"tester":"Charger","result":true}', results)
	else:
		com = utils.command_open()
		tester.tester_io(com, logger, results, ws)
		utils.command_close(com)
	utils.logger_term(logger)
	utils.logger_term(results)
	return 'OK'
def rtc():
	ws = request.environ['wsgi.websocket'] 
	if not ws:
		abort(400)
	logger = utils.logger_init('%s/%s.json' % (LOGS_PATH, serial_no))
	results = utils.logger_init('%s/%s.json' % (RESULTS_PATH, serial_no))
	if UI_DEBUG:
		utils.websocket_send(ws, '{"tester":"RTC","result":true,"seconds":120}', results)
	else:
		com = utils.command_open()
		tester.tester_rtc(com, logger, results, ws)
		utils.command_close(com)
	utils.logger_term(results)
	utils.logger_term(logger)
	return 'OK'
    def __init__(self, output_dir):
        self.output_dir = output_dir
        os.makedirs(output_dir, exist_ok=True)
        filehandler, consolehandler = logger_init(output_dir, logging.DEBUG)

        self.setup_fretboard()
        self.tab_got()
    def __init__(self):

        # Logger
        self.logger = logger_init()
        # Use Cuda
        Config.cuda = True
        self.device = None
        if Config.cuda and torch.cuda.is_available():
            self.device = torch.device('cuda')
        else:
            self.device = torch.device('cpu')

        ################## Data ###################
        # Load Sparse Adjacency Matrix
        file_name = 'adj_input.pkl'
        (data, rows, columns, vocab_dict) = pd.read_pickle(file_name)
        id_word_map = {v: k for k, v in vocab_dict.items()}
        rel_list = ['ISA']
        num_entities = len(vocab_dict)
        num_relations = len(rel_list)

        # Build the adjacency matrix and remove the edges which fre < 10.
        rows = rows + [i for i in range(num_entities)]
        columns = columns + [i for i in range(num_entities)]
        data = data + [1 for i in range(num_entities)]
        adjs = coo_matrix((data, (rows, columns)),
                          shape=(num_entities, num_entities)).toarray()
        # only hyponym-hypernym candidate pairs observed more than 10 times are used to create a noisy graph.
        adjs = np.where(adjs >= 10, 1, 0)
        self.adjs = torch.FloatTensor(adjs).to(device=self.device)
        del rows
        del columns
        del data

        # Use X as index for the randomly initialized embeddings
        self.X = torch.LongTensor([i for i in range(num_entities)
                                   ]).to(device=self.device)
        # Load the word embedding if we use it.
        self.word_embs = load_embeddings(vocab_dict).to(device=self.device)
        logging.info('Finished the preprocessing')

        ################## Model, Optimizer, LossFunction ###################
        self.model = GRAPH2TAXO(num_entities,
                                num_relations).to(device=self.device)
        self.opt = torch.optim.Adam(self.model.parameters(),
                                    lr=Config.learning_rate,
                                    weight_decay=Config.L2)
        self.f1_loss = F1_Loss().to(device=self.device)

        ################## Part of Hyperparameters ###################
        # Hyperparameters for the constraints
        self.lambda_A = 1.0  # 1.0
        self.c_A = 0.5  # 0.5
        self.tau_A = 1.0  # 1.0
예제 #5
0
    def __init__(self, db_name, db_path=None, logger=None):
        if db_path is not None:
            # hax if path contains db name
            full_db_name = db_path.replace(db_name, '') + db_name
        else:
            full_db_name = db_name

        if logger is None:
            logger = logger_init('DB_log')

        self.logger = logger
        self.db_name = db_name
        # full name - with path
        self._full_db_name = full_db_name
        self._db_connector = None
def power():
	global serial_no
	ws = request.environ['wsgi.websocket'] 
	if not ws:
		abort(400)
	ws.receive()
	logger = utils.logger_init('%s/%s.json' % (LOGS_PATH, serial_no), 'wb')
	results = utils.logger_init('%s/%s.json' % (RESULTS_PATH, serial_no))
	if UI_DEBUG:
		time.sleep(0.5)
		utils.websocket_send(ws, '{"tester":"Current","result":true}', results)
		time.sleep(0.5)
		utils.websocket_send(ws, '{"tester":"Voltage","result":true}', results)
	else:
		com = utils.command_open()
		if com:
			tz_power.check(com, ws, logger, results)
			utils.command_close(com)
		else:
			utils.websocket_send(ws, '{"tester":"Current","result":false}', results)
			utils.websocket_send(ws, '{"tester":"Voltage","result":false}', results)
	utils.logger_term(logger)
	utils.logger_term(results)
	return 'OK'
def ble():
	global serial_no
	ws = request.environ['wsgi.websocket'] 
	if not ws:
		abort(400)
	results = utils.logger_init('%s/%s.json' % (RESULTS_PATH, serial_no))
	if UI_DEBUG:
		time.sleep(1)
		utils.websocket_send(ws, '{"tester":"BLE","result":true,"RSSI":-50}', results)
	else:
		com = utils.command_open()
		tester.tester_ble(com, results, ws)
		utils.command_close(com)
	utils.logger_term(results)
	return 'OK'
def term():
	ws = request.environ['wsgi.websocket'] 
	if not ws:
		abort(400)
	results = utils.logger_init('%s/%s.json' % (RESULTS_PATH, serial_no))
	if UI_DEBUG:
		pass
	else:
		com = utils.command_open()
		tester.tester_terminate(com)	#ファームウェア停止
		firm_writer.erase_tester(com)	#ファームウェア消去
		tz_power.off(com)		#USB電源OFF
		utils.command_close(com)
	utils.websocket_send(ws, '{"tester":"Terminated","result":true}', results)
	utils.logger_term(results)

	return 'OK'
def tz1_firm():
	global serial_no
	ws = request.environ['wsgi.websocket'] 
	if not ws:
		abort(400)
	results = utils.logger_init('%s/%s.json' % (RESULTS_PATH, serial_no))
	if UI_DEBUG:
		utils.websocket_send(ws, '{"tester":"TZ1Firm","result":true}', results)
	else:
		com = utils.command_open()

		if firm_writer.write_tester(com):
			utils.websocket_send(ws, '{"tester":"TZ1Firm","result":true}', results)
		else:
			utils.websocket_send(ws, '{"tester":"TZ1Firm","result":false}', results)
		utils.command_close(com)
	utils.logger_term(results)
	return 'OK'
def start():
	global serial_no
	ws = request.environ['wsgi.websocket'] 
	msg = ''
	if not ws:
		abort(400)

	serial_no = ws.receive()
	if serial_no.startswith('TZ1'):
		msg = '{"tester":"Start","result":true, "serial_no":"%s"}' % serial_no
	else:
		serial_no = 'invalid_serial'
		msg = '{"tester":"Start","result":false, "serial_no":"%s"}' % serial_no
	
	results = utils.logger_init('%s/%s.json' % (RESULTS_PATH, serial_no), 'wb')
	utils.websocket_send(ws, msg, results)
	utils.logger_term(results)
	return 'OK'
import logging
from utils import logger_init


def viewImage(image):
    cv2.namedWindow('Display', cv2.WINDOW_NORMAL)
    cv2.imshow('Display', image)
    cv2.waitKey(0)
    cv2.destroyAllWindows()


if __name__ == '__main__':

    output_dir = 'output'
    os.makedirs(output_dir, exist_ok=True)
    fileHandler, consoleHandler = logger_init(output_dir, logging.DEBUG)

    ## Read
    # img_path = 'assets/leaf.jpeg'
    img_path = 'data/guitar/test/2019-05-28-085835_1.jpg'
    img_name = os.path.basename(img_path).split('.')[0]
    img_gbr = cv2.imread(img_path)
    cv2.imwrite(os.path.join(output_dir, img_name + '_0' + '.jpg'), img_gbr)

    hsv_img = cv2.cvtColor(img_gbr, cv2.COLOR_BGR2HSV)
    cv2.imwrite(os.path.join(output_dir, img_name + '_1' + '.jpg'), hsv_img)

    green_low = np.array([45, 100, 50])
    green_high = np.array([75, 255, 255])
    curr_mask = cv2.inRange(hsv_img, green_low, green_high)
    hsv_img[curr_mask > 0] = ([75, 255, 200])
예제 #12
0
def main(params, args, arch):
    #def main(args, i):

    # set number of threads in pytorch
    torch.set_num_threads(6)

    # select which gpu to use
    logger_init(args)

    # set gpu
    if args.GPU:
        torch.cuda.set_device(args.gpu)

    # the default settings for correspdonding dataset
    args = default_search_hyper(args)

    #    hyperOpt = {"lr":[0.00635456700742798, 0.0049700352658686425, 0.0023726642982752643],
    #                "lamb":[3.162503061522238e-05, 1.9567149674424395e-05, 1.0729611255307008e-05],
    #                "d":[512, 512, 512],
    #                "dr":[0.9933500551931267, 0.9903909316509071, 0.9933910046627364],
    #                "batch_size":[256, 256, 256]}
    #
    #    args.lr = hyperOpt["lr"][i]
    #    args.lamb = hyperOpt["lamb"][i]
    #    args.n_dim = hyperOpt["d"][i]
    #    args.decay_rate = hyperOpt["dr"][i]
    #    args.n_batch = hyperOpt["batch_size"][i]

    # load data
    # read nary data
    if args.n_arity > 2:
        d = nary_dataloader(args.task_dir)

        entity_idxs = {d.entities[i]: i for i in range(len(d.entities))}
        relation_idxs = {d.relations[i]: i for i in range(len(d.relations))}
        n_ent, n_rel = len(entity_idxs), len(relation_idxs)
        print("Number of train:{}, valid:{}, test:{}.".format(
            len(d.train_data), len(d.valid_data), len(d.test_data)))

        train_data = torch.LongTensor(
            get_data_idxs(d.train_data, entity_idxs, relation_idxs))
        valid_data = torch.LongTensor(
            get_data_idxs(d.valid_data, entity_idxs, relation_idxs))
        test_data = torch.LongTensor(
            get_data_idxs(d.test_data, entity_idxs, relation_idxs))

        e1_sp, e2_sp, e3_sp = n_ary_heads(train_data, valid_data, test_data)


#        train_data = torch.LongTensor(get_data_idxs(d.train_data, entity_idxs, relation_idxs))[0:512]
#        valid_data = torch.LongTensor(get_data_idxs(d.valid_data, entity_idxs, relation_idxs))[0:512]
#        test_data = torch.LongTensor(get_data_idxs(d.test_data, entity_idxs, relation_idxs))[0:512]

    else:
        loader = DataLoader(args.task_dir)
        n_ent, n_rel = loader.graph_size()
        train_data = loader.load_data('train')
        valid_data = loader.load_data('valid')
        test_data = loader.load_data('test')
        print("Number of train:{}, valid:{}, test:{}.".format(
            len(train_data[0]), len(valid_data[0]), len(test_data[0])))

        heads, tails = loader.heads_tails()

        train_data = torch.LongTensor(train_data).transpose(0, 1)  #[0:512]
        valid_data = torch.LongTensor(valid_data).transpose(0, 1)  #[0:512]
        test_data = torch.LongTensor(test_data).transpose(0, 1)  #[0:512]

    file_path = "search_nary" + "_" + str(args.num_blocks)
    directory = os.path.join("results", args.dataset, file_path)
    args.out_dir = directory
    if not os.path.exists(directory):
        os.makedirs(directory)
    os.environ["OMP_NUM_THREADS"] = "4"
    os.environ["MKL_NUM_THREADS"] = "4"
    args.perf_file = os.path.join(
        directory, args.dataset + '_search_nCP_nary_' + str(args.num_blocks) +
        "_" + str(args.trial) + '.txt')

    print('output file name:', args.perf_file)

    args.lr = params["lr"]
    args.decay_rate = params["decay_rate"]
    args.n_batch = params["n_batch"]
    #args.n_dim = params["n_dim"]
    args.input_dropout = params["input_dropout"]
    args.hidden_dropout = params["hidden_dropout"]
    #args.lamb = params["lamb"]

    plot_config(args)

    def tester_val(facts=None, arch=None):
        if args.n_arity == 2:
            return model.test_link(test_data=valid_data,
                                   n_ent=n_ent,
                                   heads=heads,
                                   tails=tails,
                                   filt=args.filter,
                                   arch=arch)

        elif args.n_arity > 2:
            return model.evaluate(valid_data, e1_sp, e2_sp, e3_sp, arch)

    def tester_tst():
        if args.n_arity == 2:
            return model.test_link(test_data=test_data,
                                   n_ent=n_ent,
                                   heads=heads,
                                   tails=tails,
                                   filt=args.filter)
        elif args.n_arity > 2:
            return model.evaluate(test_data, e1_sp, e2_sp, e3_sp)

    tester_trip_class = None
    model = BaseModel(n_ent, n_rel, args, arch)
    mrr = model.train(train_data, valid_data, tester_val, tester_tst,
                      tester_trip_class)

    return mrr
예제 #13
0
from data_process.dnn_DataLoader import LoadData
from data_process.bert_DataLoader import BertDataGenerator
from data_process.siamesebert_DataLoader import SiameseDataGenerator
from model import SiameseCnnModel, SiameseRnnModel, SiameseBertModel, BertModel
from utils import logger_init, Evaluator, cal_acc

from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.models import load_model, Model
import pandas as pd
import argparse

from bert4keras.backend import set_gelu

# 初始化logging
logger = logger_init()

MODEL_CLASS = {
    "siamese_CNN": SiameseCnnModel,
    "siamese_RNN": SiameseRnnModel,
    "siamese_bert": SiameseBertModel,
    "albert": BertModel
}


def train(args):
    if "bert" in args.model_type:
        set_gelu("tanh")  # 切换gelu版本

        # Step1: Load Data
        data_generator = None
예제 #14
0
    torch.set_num_threads(5)

    dataset = args.task_dir.split('/')[-1]
    directory = os.path.join('results', args.model)
    if not os.path.exists(directory):
        os.makedirs(directory)

    args.out_dir = directory
    args.perf_file = os.path.join(
        directory, '_'.join([dataset, args.sample, args.update]) +
        args.out_file_info + '.txt')
    args.stat_file = os.path.join(
        directory, '_'.join([dataset, args.sample, args.update]) + '.stat')
    print('output file name:', args.perf_file, args.stat_file)

    logger_init(args)

    task_dir = args.task_dir
    loader = DataLoader(task_dir, args.N_1)

    n_ent, n_rel = loader.graph_size()

    train_data = loader.load_data('train')
    valid_data = loader.load_data('valid')
    test_data = loader.load_data('test')
    args.n_train = len(train_data[0])
    print("Number of train:{}, valid:{}, test:{}.".format(
        len(train_data[0]), len(valid_data[0]), len(test_data[0])))

    plot_config(args)