Exemplo n.º 1
0
def main():
    # parse config
    config_file = sys.argv[1]
    config = Config(config_file)

    # setup logger
    setup_logging(config.working_dir)

    # encoding func
    encoding_func = ENCODING_METHOD_MAP[config.encoding_method]
    encoding_func2= ENCODING_METHOD_MAP[config.encoding_method2]
    log_to_file('Encoding method2', config.encoding_method2)

    data_provider=[]
    for p in range(config.base_model_count):
        temp_provider = DataProvider(
             encoding_func,
             encoding_func2,
             config.data_file,
             config.test_file,
             config.batch_size,
             max_len_hla=config.max_len_hla,
             max_len_pep=config.max_len_pep,
             model_count=config.model_count
        )
        data_provider.append(temp_provider)
 
    log_to_file('max_len_hla', data_provider[0].max_len_hla)
    log_to_file('max_len_pep', data_provider[0].max_len_pep)
    
    test(config, data_provider[0])
Exemplo n.º 2
0
def __init_brand_list():
    config_file = r"D:\Users\Achelics\liu_project\mutil_device_recongition\handle_brand_information\database_config.ini"
    settion = "MyDataBase"
    db_config = Config(config_file, settion)

    url = db_config.get("url")
    user = db_config.get("user")
    pawd = db_config.get("pawd")
    database = db_config.get("database")

    try:
        # open the database
        db = MySQLdb.connect(url, user, pawd, database)
        # Using the cursor() method to get the operate cursor.
        cursor = db.cursor()
        # SQL select by vulflag
        sql_default = "SELECT DISTINCT(en_name) FROM iie_brand ORDER BY LENGTH(en_name) DESC"
        # excute SQL sentence
        cursor.execute(sql_default)
        # Get the all record
        default_results = cursor.fetchall()
        for row in default_results:
            __BRAND_LIST.append(str(row[0]).upper())
        # 关闭数据库连接
        db.close()
    except MySQLdb.Error, e:
        print("MySQL Error:%s" % str(e))
Exemplo n.º 3
0
def insert_brand():
    config_file = r"D:\Users\Achelics\liu_project\mutil_device_recongition\handle_brand_information\database_config.ini"
    settion = "MyDataBase"
    db_config = Config(config_file, settion)

    url = db_config.get("url")
    user = db_config.get("user")
    pawd = db_config.get("pawd")
    database = db_config.get("database")

    try:
        # open the database
        conn = MySQLdb.connect(url, user, pawd, database)
        # Using the cursor() method to get the operate cursor.
        cursor = conn.cursor()
        for brand_type in __RAW_BRAND_LIST:
            if str(brand_type[0]).strip().upper() not in __BRAND_LIST:
                print brand_type
                default_sql = "insert into iie_brand(en_name, product_type) values('%s', '%s')" % (
                    brand_type[0], brand_type[1])
                cursor.execute(default_sql)
        # 获取所有结果
        conn.commit()
        # 关闭指针
        cursor.close()
        # 关闭数据库连接
        conn.close()
    except MySQLdb.Error, e:
        print("MySQL Error:%s" % str(e))
Exemplo n.º 4
0
def main():
    config = Config()

    print("Processing text for \'%s\'." % (config.text_file))
    data = preprocess.preprocess(config.text_file, 'infer', config)
    dataloader = dataprocess.load_infer(data, config)

    G = Generator(config)
    G.load_state_dict(load_weights(config.checkpoint_file))
    G = set_device(G, config.device, config.use_cpu)
    G.eval()

    print("Generating spectrogram with \'%s\'." % (config.checkpoint_file))

    spec = []
    y_prev = torch.zeros(1, config.prev_length, config.fft_size // 2 + 1)
    for x in tqdm(dataloader, leave=False, ascii=True):
        x, y_prev = set_device((x, y_prev), config.device, config.use_cpu)

        y_gen = G(x, y_prev)
        y_gen = y_gen.squeeze(1)
        y_prev = y_gen[:, -config.prev_length:, :]
        spec.append(y_gen.data)

    print("Generating audio with Griffin-Lim algorithm.")
    spec = torch.cat(spec, dim=1).transpose(1, 2)  # T x D -> D x T
    wave = dsp.inv_spectrogram(spec, config)

    savename = config.checkpoint_file.replace('.pt', '_') + os.path.basename(
        config.text_file).replace('.txt', '.wav')
    dsp.save(savename, wave, config.sample_rate)

    print("Audio saved to \'%s\'." % (savename))
Exemplo n.º 5
0
def main():
    # parse config
    config_file = sys.argv[1]
    config = Config(config_file)

    # setup logger
    setup_logging(config.working_dir)

    # encoding func
    encoding_func = ENCODING_METHOD_MAP[config.encoding_method]
    encoding_func2 = ENCODING_METHOD_MAP[config.encoding_method2]
    log_to_file('Encoding method2', config.encoding_method2)

    data_provider = []
    for p in range(config.base_model_count):
        temp_provider = DataProvider(encoding_func,
                                     encoding_func2,
                                     config.data_file,
                                     config.test_file,
                                     config.batch_size,
                                     max_len_hla=config.max_len_hla,
                                     max_len_pep=config.max_len_pep,
                                     model_count=config.model_count)
        data_provider.append(temp_provider)

    log_to_file('Traning samples', len(data_provider[0].train_samples[0]))
    log_to_file('Val samples', len(data_provider[0].validation_samples[0]))
    log_to_file('Traning steps', data_provider[0].train_steps())
    log_to_file('Val steps', data_provider[0].val_steps())
    log_to_file('Batch size', data_provider[0].batch_size)
    log_to_file('max_len_hla', data_provider[0].max_len_hla)
    log_to_file('max_len_pep', data_provider[0].max_len_pep)

    for p in range(config.base_model_count):
        train(config, data_provider[p], p)
def insert_brand_model():
    config_file = r"D:\Users\Achelics\liu_project\mutil_device_recongition\handle_brand_information\database_config.ini"
    settion = "MyDataBase"
    db_config = Config(config_file, settion)

    url = db_config.get("url")
    user = db_config.get("user")
    pawd = db_config.get("pawd")
    database = db_config.get("database")

    try:
        # open the database
        conn = MySQLdb.connect(url, user, pawd, database)
        # Using the cursor() method to get the operate cursor.
        cursor = conn.cursor()
        for brand_model in __BRAND_MODEL_LIST:
            default_sql = "insert into iie_brand_model(brand, model, category) values('%s', '%s', '%s')" % \
                          (brand_model["Brand"], brand_model["Model"], brand_model["type"])
            cursor.execute(default_sql)
        # 获取所有结果
        conn.commit()
        # 关闭指针
        cursor.close()
        # 关闭数据库连接
        conn.close()
    except MySQLdb.Error, e:
        print("MySQL Error:%s" % str(e))
        print("插入数据库失败")
Exemplo n.º 7
0
def main():
    # parse config
    config_file = sys.argv[1]

    config = Config(config_file)
    folder = config_file.split('/')[0]
    encoding_func = ENCODING_METHOD_MAP[config.encoding_method]
    encoding_func2= ENCODING_METHOD_MAP[config.encoding_method2]
    data_provider = DataProvider(
        encoding_func,
        encoding_func2,
        config.data_file,
        config.test_file,
        config.batch_size,
        max_len_hla=config.max_len_hla,
        max_len_pep=config.max_len_pep,
        model_count=config.model_count
        )
    device = config.device
    models = config.model_count*config.base_model_count
    print(models)
    total_df=pd.DataFrame()
    for i in range(models):
    # load and prepare model
        path = folder + "/best_model_{}.pytorch".format(i)
        state_dict = torch.load(path)
        model = Model(config)
        model.load_state_dict(state_dict)
        model.to(device)
        model.eval()
        data_provider.new_epoch()

        for _ in range(data_provider.test_steps()):
            data = data_provider.batch_test()
            hla_a, hla_mask, hla_a2, hla_mask2, pep, pep_mask, pep2, pep_mask2, uid_list = data  
            temp_attn = {}
            temp_attn_hla = {}
            with torch.no_grad():
                 temp = model.encoder_peptide2.conv_0(pep2.to(device))
                 temp, att = model.encoder_peptide2.att_0(temp.to(device))

            for i in range(config.batch_size):
                temp_attn[uid_list[i].split('-')[3]]=att[i].tolist()

            temp_df=pd.DataFrame.from_dict(temp_attn,orient="index")

            total_df=pd.concat([total_df,temp_df])

    avg_= total_df.mean(axis=0)
    avg_= pd.DataFrame({'position':avg_.index+1, 'avg weight':avg_.values})  
    avg_.to_csv(folder + "/" + "attn_weight.csv",index=None)
Exemplo n.º 8
0
def run():

    config = Config('configs.yaml')
    database = Database(config.database)

    controller = Controller()

    while True:
        controller.fill_wall_post_trigger(database)
        controller.fill_comment_trigger(database)

        controller.fill_wall_post_emotion(database)
        controller.fill_comment_emotion(database)

        sleep(60 * 60)
Exemplo n.º 9
0
def main():
    config = Config()
    config_basename = os.path.basename(config.configs[0])
    print("Configuration file: \'%s\'" % (config_basename))

    set_list = ['train', 'valid']
    file_list = {}

    # Creating Path for Features
    create_path(config.feature_path, action='overwrite', verbose=False)
    for set_type in set_list:
        path = os.path.join(config.feature_path, set_type)
        create_path(path, action='overwrite')

        list_file = set_type + '_list.txt'
        file_list[set_type] = read_file_list(
            os.path.join(config.dataset_path, list_file))

    # Extracting Features
    if config.num_proc > 1:
        if config.use_cpu is False:
            raise AssertionError("You can not use GPU with multiprocessing.")

        p = Pool(config.num_proc)
        for set_type in set_list:
            p.map(partial(preprocess, set_type=set_type, config=config),
                  file_list[set_type])
    else:
        for set_type in set_list:
            [
                preprocess(f, set_type=set_type, config=config)
                for f in file_list[set_type]
            ]

    # Creating Files Indices
    for set_type in set_list:
        path = os.path.join(config.feature_path, set_type)
        file_indices = make_indices(path)
        torch.save(file_indices,
                   os.path.join(config.feature_path, set_type + '_indices.pt'))

    print("Feature saved to \'%s\'." % (config.feature_path))
Exemplo n.º 10
0
    def create_experiment_info(self):
        # create subject data directory
        data_dir = 'data/{}/'.format(self.subject_number.get())
        if not os.path.exists(data_dir):
            os.makedirs(data_dir)
        else:
            files = glob.glob(data_dir + '*')
            [os.remove(f) for f in files]

        # create config object
        config = Config(config_filename=self.config_filename.get())

        # create trial list and save to csv
        config.save_trial_list(path=data_dir)

        print "Trial list created as {}.".format(data_dir + 'trials.csv')

        # create experiment info and save to txt
        config.save_experiment_info(glove_hand=self.hand.get(), path=data_dir)

        print "Experiment info created as {}.".format(data_dir + 'exp_info.txt')
Exemplo n.º 11
0
def main():
    config = Config()
    config_basename = os.path.basename(config.configs[0])
    print("Configuration file: \'%s\'" % (config_basename))

    checkpoint_path = create_path(config.checkpoint_path,
                                  action=config.checkpoint_path_action)
    config.save(os.path.join(checkpoint_path, config_basename))
    logger = Logger(os.path.join(checkpoint_path, 'log'))

    dataloader = dataprocess.load_train(config)
    step_size = config.step_epoch * len(dataloader.train)

    G = Generator(config)
    D = Discriminator(config)
    G, D = set_device((G, D), config.device, config.use_cpu)

    criterionL1 = nn.L1Loss()
    optimizerG = torch.optim.Adam(G.parameters(),
                                  lr=config.learn_rate,
                                  betas=config.betas,
                                  weight_decay=config.weight_decay)
    optimizerD = torch.optim.Adam(D.parameters(),
                                  lr=config.learn_rate,
                                  betas=config.betas,
                                  weight_decay=config.weight_decay)
    schedulerG = StepLR(optimizerG,
                        step_size=step_size,
                        gamma=config.decay_factor)
    schedulerD = StepLR(optimizerD,
                        step_size=step_size,
                        gamma=config.decay_factor)

    k = 0.0
    M = AverageMeter()
    lossG_train = AverageMeter()
    lossG_valid = AverageMeter()
    lossD_train = AverageMeter()

    print('Training start')
    for epoch in range(config.stop_epoch + 1):
        # Training Loop
        G.train()
        D.train()
        for batch in tqdm(dataloader.train, leave=False, ascii=True):
            x, y_prev, y = set_device(batch, config.device, config.use_cpu)
            y = y.unsqueeze(1)

            optimizerG.zero_grad()
            y_gen = G(x, y_prev)
            lossL1 = criterionL1(y_gen, y)
            loss_advG = criterionAdv(D, y_gen)
            lossG = lossL1 + loss_advG
            lossG.backward()
            optimizerG.step()
            schedulerG.step()

            optimizerD.zero_grad()
            loss_real = criterionAdv(D, y)
            loss_fake = criterionAdv(D, y_gen.detach())
            loss_advD = loss_real - k * loss_fake
            loss_advD.backward()
            optimizerD.step()
            schedulerD.step()

            diff = torch.mean(config.gamma * loss_real - loss_fake)
            k = k + config.lambda_k * diff.item()
            k = min(max(k, 0), 1)

            measure = (loss_real + torch.abs(diff)).data
            M.step(measure, y.size(0))

            logger.log_train(lossL1, loss_advG, lossG, loss_real, loss_fake,
                             loss_advD, M.avg, k, lossG_train.steps)
            lossG_train.step(lossG.item(), y.size(0))
            lossD_train.step(loss_advD.item(), y.size(0))

        # Validation Loop
        G.eval()
        D.eval()
        for batch in tqdm(dataloader.valid, leave=False, ascii=True):
            x, y_prev, y = set_device(batch, config.device, config.use_cpu)
            y = y.unsqueeze(1)

            y_gen = G(x, y_prev)
            lossL1 = criterionL1(y_gen, y)
            loss_advG = criterionAdv(D, y_gen)
            lossG = lossL1 + loss_advG

            logger.log_valid(lossL1, loss_advG, lossG, lossG_valid.steps)
            lossG_valid.step(lossG.item(), y.size(0))

        for param_group in optimizerG.param_groups:
            learn_rate = param_group['lr']

        print(
            "[Epoch %d/%d] [loss G train: %.5f] [loss G valid: %.5f] [loss D train: %.5f] [lr: %.6f]"
            % (epoch, config.stop_epoch, lossG_train.avg, lossG_valid.avg,
               lossD_train.avg, learn_rate))

        lossG_train.reset()
        lossG_valid.reset()
        lossD_train.reset()

        savename = os.path.join(checkpoint_path, 'latest_')
        save_checkpoint(savename + 'G.pt', G, optimizerG, learn_rate,
                        lossG_train.steps)
        save_checkpoint(savename + 'D.pt', D, optimizerD, learn_rate,
                        lossD_train.steps)
        if epoch % config.save_epoch == 0:
            savename = os.path.join(checkpoint_path,
                                    'epoch' + str(epoch) + '_')
            save_checkpoint(savename + 'G.pt', G, optimizerG, learn_rate,
                            lossG_train.steps)
            save_checkpoint(savename + 'D.pt', D, optimizerD, learn_rate,
                            lossD_train.steps)

    print('Training finished')
Exemplo n.º 12
0
from time import sleep

from src.controller import Controller
from src.database import Database
from config_parser import Config

config = Config('configs.yaml')
database = Database(config.database)

controller = Controller()

while True:
    controller.fill_trigger(database)
    controller.fill_emotion(database)
    print("Update completed")
    sleep(60 * 60)
Exemplo n.º 13
0
 def __init__(self, enviornment=None):
     self.__config_obj = Config(enviornment)
     self.__oracle_runner = OracleRunner(self.__config_obj.db_user,
                                         self.__config_obj.db_password,
                                         self.__config_obj.db_sid, False,
                                         self.__config_obj.log_path)
Exemplo n.º 14
0
def run():

    config = Config('configs.yaml')

    vk = VK(config.vk)
    database = Database(config.database)
    controller = Controller(config.controller)
    group_urls_path = 'data/vk/group_short_url.txt'

    n = 0

    while True:

        offset = n * 100

        for url in get_data(group_urls_path):

            group_info = controller.get_group_info(vk, url)
            group_id = group_info[0]['id']

            try:

                wall_posts = controller.get_target_post(
                    vk, group_id, offset, url)

                for post in wall_posts:

                    post['Text'] = ' '.join(
                        list(filter(None, re.split('\W|\d', post['Text']))))
                    post['PublishDateTime'] = handle_date(
                        post['PublishDateTime'])

                    controller.save_data_to_bd(bd=database,
                                               table='WallPost',
                                               target_object=post)

                for post in wall_posts:

                    comments = controller.get_target_comments(
                        vk, group_id, post_id=post['OuterId'], offset=0)

                    for comment in comments:

                        comment['Text'] = ' '.join(
                            list(
                                filter(None, re.split('\W|\d',
                                                      comment['Text']))))
                        comment['PublishDateTime'] = handle_date(
                            comment['PublishDateTime'])

                        controller.save_data_to_bd(bd=database,
                                                   table='Comment',
                                                   target_object=comment)

                    for comment in comments:

                        user = controller.get_target_user(
                            vk, comment['OuterId'])

                        controller.save_data_to_bd(bd=database,
                                                   table='SocialNetworkUser',
                                                   target_object=user)

            except Exception as e:
                print(e)

        n += 1
Exemplo n.º 15
0
            cursor = connection.cursor()
            cursor.execute(sql_select_Query)
            records = cursor.fetchall()

            print("Total number of rows in Laptop is: ", cursor.rowcount)

            print("\nPrinting each laptop record")

            if db_name == "TriggerWord":
                return self.get_trigger_word_list(records)
            if db_name == "WallPost":
                return self.get_wall_post_list(records)
            if db_name == 'Comment':
                return self.get_comment_list(records)


        except Error as e:
            print("Error reading data from MySQL table", e)
        finally:
            if (connection.is_connected()):
                connection.close()
                cursor.close()
                print("MySQL connection is closed")


if __name__ == "__main__":
    from config_parser import Config
    config = Config("../configs.yaml")
    db = Database(config.database)
    L = db.select('WallPost')
    print(L)
Exemplo n.º 16
0
def main():

    config = Config("dup_0/config.json")

    # get HLA and peptide
    HLA = sys.argv[1]
    peptide = sys.argv[2]
    if len(peptide) > 15:
        print("please input the peptide shorter than 16 amino acids.")
        return

    hla_seq = 0
    # get the sequence of HLA
    hla_path = os.path.join(BASE_DIR, '..', 'dataset',
                            'mhc_i_protein_seq2.txt')
    with open(hla_path, 'r') as in_file:
        for line_num, line in enumerate(in_file):
            if line_num == 0:
                continue

            info = line.strip('\n').split(' ')
            if info[0] != HLA:
                continue
            hla_seq = info[1]
            break
    if hla_seq == 0:
        print("The HLA is not included  in the dataset.")
        return

    # encode the sequences

    encoding_func = ENCODING_METHOD_MAP["one_hot"]
    encoding_func2 = ENCODING_METHOD_MAP["blosum"]

    hla, hla_mask = encoding_func(hla_seq, 385)
    hla = torch.reshape(hla, (1, hla.size(0), hla.size(1)))

    pep, pep_mask = encoding_func(peptide, 15)
    pep = torch.reshape(pep, (1, pep.size(0), pep.size(1)))

    hla2, hla_mask2 = encoding_func2(hla_seq, 385)
    hla2 = torch.reshape(hla2, (1, hla2.size(0), hla2.size(1)))
    pep2, pep_mask2 = encoding_func2(peptide, 15)
    pep2 = torch.reshape(pep2, (1, pep2.size(0), pep2.size(1)))

    # load model
    device = config.device

    temp_list = []
    for p in range(config.base_model_count):
        for k in range(config.model_count):
            state_dict = torch.load(
                config.model_save_path(p * config.model_count + k))
            model = Model(config)
            model.load_state_dict(state_dict)
            model.to(device)
            model.eval()
            with torch.no_grad():
                pred_ic50 = model(hla.to(device), hla_mask.to(device),
                                  hla2.to(device), hla_mask2.to(device),
                                  pep.to(device), pep_mask.to(device),
                                  pep2.to(device), pep_mask2.to(device))
                pred_ic50 = math.pow(50000, 1 - pred_ic50)

            temp_list.append(pred_ic50)

    pred_ic50 = sum(temp_list) / len(temp_list)

    print("the predicted IC50 value is : {}".format(pred_ic50))
Exemplo n.º 17
0
import os
import sys
from github import Github
import argparse
from envdefault import EnvDefault
import checks
from config_parser import Config
import openpyxl

config = Config("config.yml")
row = 1


def parse_arguments():
    """
    Parse them args
    """
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '-t',
        '--token',
        action=EnvDefault,
        envvar='GIT_API_TOKEN',
        help="GitHub token: set using -t or setting env var 'GIT_API_TOKEN'")
    parser.add_argument('-e',
                        '--enterprise',
                        action='store_true',
                        help="Enable Github Enterprise")
    org_repo_group = parser.add_mutually_exclusive_group(required=True)
    org_repo_group.add_argument('-o',
                                '--organization',
Exemplo n.º 18
0
from config_parser import Config
import json
import viz
import vizact
import numpy as np
from vis_env import Room, BaseballGlove
from stimuli import Ball

config_filename = 'config/home.py'

config = Config(config_filename)
config.save_experiment_info(path='figures/')

# read experiment info (config dict)
with open('figures/exp_info.txt') as info_file:
    config = json.load(info_file)

room = Room(config)

#ball = Ball(room, size=0.15, position=[0,1.8,4], color=[1, 1, 1])
#ball.setContrastMode('constant_dark')
glove = viz.addChild('pole.wrl')


def rotate_glove():
    direction = glove.getMatrix().getForward()
    print direction
    print glove.getQuat()
    #direction[0] = -direction[0]
    #glove.lookAt(direction, viz.REL_LOCAL)
    #print glove.getMatrix().getForward()