Example #1
0
def param_init(log_filename):

    global bio_ner_train_data, bio_ner_test_data, bio_ner_model, bio_ner_report

    bio_ner_train_data = data(
        RAW_DATA_PATH, PROCESSED_DATA_PATH, WORD_VEC_FILE, CHAR_VEC_FILE,
        POS_VEC_FILE, ADD_FEAT_VEC_FILE, CLASS_VEC_FILE,
        ADD_FEAT_FILE_SUBSTRING, CHAR_ONE_HOT_FILE_SUBSTRING,
        WORD_CONTEXT_LENGTH, NB_UNIQUE_CHARS, CHAR_VECTOR_SIZE,
        WORD_VECTOR_SIZE, NB_CLASSES, MAX_WORD_LENGTH, ADD_FEAT_VEC_SIZE,
        LIMITED_ADD_FEAT_VEC_SIZE, POS_TAG_VECTOR_SIZE)

    bio_ner_test_data = data(
        RAW_DATA_PATH, PROCESSED_DATA_PATH, TEST_word_vec_file_substring,
        CHAR_VEC_FILE, POS_VEC_FILE, ADD_FEAT_VEC_FILE,
        TEST_entity_vec_file_substring, TEST_add_feat_vec_file_substring,
        TEST_char_vec_file_substring, WORD_CONTEXT_LENGTH, NB_UNIQUE_CHARS,
        CHAR_VECTOR_SIZE, WORD_VECTOR_SIZE, NB_CLASSES, MAX_WORD_LENGTH,
        ADD_FEAT_VEC_SIZE, LIMITED_ADD_FEAT_VEC_SIZE, POS_TAG_VECTOR_SIZE)

    bio_ner_model = model(WORD_VECTOR_SIZE, WORD_CONTEXT_LENGTH,
                          NB_UNIQUE_CHARS, CHAR_VECTOR_SIZE, MAX_WORD_LENGTH,
                          EMBEDDING_OP_DIM, MAX_FEATURES, NB_CLASSES,
                          CHAR_FEATURE_OUTPUT, HIDDEN_SIZE, ADD_FEAT_VEC_SIZE,
                          POS_TAG_VECTOR_SIZE)

    bio_ner_report = report(REPORT_PATH, log_filename)
Example #2
0
 def read(self, fileName):
     """reads a csv into the data set"""
     i = 0
     file = open(fileName)
     for line in file:
         if len(line) <= 1:
             pass
         elif i >= 1:
             self.append(data(line))
         else:
             self.header = data(line)
         i |= 1
     file.close()
def BackupM():

    a = 1
    count = 0
    print('iniciando...')
    while a in range(len(nome) + 1):

        try:
            print(nome[a - 1])
            paf = ('/home/%s/backup.tar.gz' % nome[a - 1])
            ssh = paramiko.SSHClient()
            ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
            ssh.connect(ip[a - 1], username=nome[a - 1], password=senha[a - 1])
            sftp = ssh.open_sftp()
            #stdin, stdout, stderr = ssh.exec_command('tar -czvf /home/%s/backup.tar.gz /home/%s/Backup'%(nome[a-1],nome[a-1]))
            sftp.get(format(paf),
                     '/home/user208/%s%s.tar.gz' % (nome[a - 1], data.data()))
            sftp.close()

            paf = ("cp /home/user208/'%s%s.tar.gz' /home/user208/Dropbox" %
                   (nome[a - 1], data.data()))
            os.system(paf)
            print('\nEnvio concluído.\n')

        except:
            count += 1
            falha.append(nome[a - 1])

        a += 1
    print('O backup foi finalizado. Das %d máquinas, %d não fizeram backup.' %
          (len(nome), count))

    smtp = smtplib.SMTP_SSL('smtp.mail.yahoo.com', 465)

    smtp.login('mail', 'password')

    de = 'who send'
    para = ['from']

    msg = """From: %s
    To: %s
    Subject: Backup

    Backup feito.""" % (de, ', '.join(para))

    smtp.sendmail(de, para, msg)

    smtp.quit()
    print('Email enviado.\n')
    def __init__(self, params):
        #getting Params pickle file which was saved in persona.train()
        with open(params.decode_path + "/params.pickle", 'rb') as file:
            model_params = pickle.load(file)

        #Copying those parameters from model_params which are not in params
        for key in model_params.__dict__:
            if key not in params.__dict__:
                params.__dict__[key] = model_params.__dict__[key]

        self.params = params
        self.mode = "decoding"

        if self.params.PersonaMode:
            print("decoding in persona mode")
        else:
            print("decoding in non persona mode")

        self.Data = data(self.params)  #Intializing EOT, EOS, beta and params

        self.lstm_source = lstm_source_(self.params)
        self.lstm_target = lstm_target_(self.params)
        self.softmax = softmax_(self.params)
        if self.params.use_GPU:
            self.lstm_source = self.lstm_source.cuda()
            self.lstm_target = self.lstm_target.cuda()
            self.softmax = self.softmax.cuda()

        self.readModel(
        )  #loading the model(only parameters) of first iteration in training
        self.ReadDict(
        )  #buidling a dictionary of words, with keys from 0 to len(dictionary.txt)
        self.read_dict()
Example #5
0
    async def birthday_command(self,
                               ctx,
                               user: discord.User = None,
                               birthday=""):
        """ get/set users birthday
        """

        logging.info(f'{ctx.author} tried to use the "birthday" command')

        info = data(ctx.guild)

        if birthday == "":
            if info.get_birthday(user) is not None:
                await ctx.send(
                    f'{user.display_name}\'s birthday is `{info.get_birthday(user).strftime("%m/%d/%Y")}`'
                )
            else:
                await ctx.send(f'Please set {user.display_name}\'s birthday')
            return

        try:
            birthday = parser.parse(birthday)
        except parser.ParserError:
            await ctx.send("Incorrect birthday format, try `month-day-year`")
            return

        info.set_birthday(user, birthday)

        logging.info(f'{user} birthday is now {info.get_birthday(user)}')
        await ctx.send(
            f'Set {user.display_name}\'s birthday to `{info.get_birthday(user).strftime("%m/%d/%Y")}`'
        )
        def __init__(self, *args, **kwargs):
                wx.Frame.__init__(self, *args, **kwargs)
                self.data = data.data()
                self.buttons = {}
                self.sizers = {}
                self.static_sizers = {}    
                self.canvas = {}
                self.panels = {}
                self.grids = {}
                self.check_boxes = {}
                self.texts = {}
                self.spins = {}
                
                self.menu_bars = {}
                self.menus = {}
                self.menu_items = {}
                
                self.mouse = wx.MouseEvent()
                
                self.create_menu()
                self.create_ui()

                self.Show()
                i = [key for key in self.data.GetList() if self.data.CheckKey(key, 'active')]
                for widget in i:
                        key = self.data.GetWidget(widget)
                        if not key['active']:
                                if self.data.GetValue(key['parent'],'w_type')=='sizer':
                                        self.sizers[key['parent']].Hide(key['position'])
                                elif self.data.GetValue(key['parent'],'w_type')=='static_sizer':
                                        self.static_sizers[key['parent']].Hide(key['position'])
Example #7
0
    def dataElement_create(self, **kwargs):
        """
        Creates the 'data' element container.

        :param kwargs: 'root' = <location>
        :return:
        """
        s = self._stree
        SeriesFilesCount = 3
        str_root = '/'
        for key, val in kwargs.iteritems():
            if key == 'root': str_root = val
            if key == 'SeriesFilesCount': SeriesFilesCount = val

        sample = data.data()
        sample.contents_build_1(SeriesFilesCount=SeriesFilesCount)
        # sample.dataComponent_pluginRun()

        s.cd(str_root)
        l_data = sample.contents.lstr_lsnode('/')
        if sample.contents.cd('/')['status']:
            for d in l_data:
                s.graft(sample.contents, '/%s' % d)

        return (dict(sample.contents.snode_root))
Example #8
0
 def __init__(self, name, x, z):
     
     # id
     global ENEMY_ID        
     self.id = ENEMY_ID
     ENEMY_ID += 1        
     
     # set data        
     self.__name = name
     self.__data = enemies_data.get_data(name)
     self.__data.pos = data.data()
     self.__data.pos.x = x
     self.__data.pos.z = z
     self.__data.id = self.id
     self.__data.last_fire_time = timer.gtimer.current()
     self.__data.target = None
     
     self.hp = self.__data.hp
     
     # state machine
     self.__statem = state_manager.istate_manager()
     self.__statem.register(STATE_IDLE, state_idle(self.__data, self))
     self.__statem.register(STATE_RUN, state_run(self.__data, self))
     self.__statem.register(STATE_FIRE, state_fire(self.__data, self))
     self.__statem.register(STATE_BEATEN, state_beaten(self.__data, self))
     self.__statem.change_to(STATE_IDLE, None)
     
     # broadcast enemy born
     pkt = self.get_born_pkt()
     game.controller.gcontroller.broadcast(pkt)
Example #9
0
    def dataElement_create(self, **kwargs):
        """
        Creates the 'data' element container.

        :param kwargs: 'root' = <location>
        :return:
        """
        s                   = self._stree
        SeriesFilesCount    = 3
        str_root    = '/'
        for key, val in kwargs.iteritems():
            if key == 'root':               str_root            = val
            if key == 'SeriesFilesCount':   SeriesFilesCount    = val

        sample      = data.data()
        sample.contents_build_1(SeriesFilesCount = SeriesFilesCount)
        # sample.dataComponent_pluginRun()

        s.cd(str_root)
        l_data   = sample.contents.lstr_lsnode('/')
        if sample.contents.cd('/')['status']:
            for d in l_data:
                s.graft(sample.contents, '/%s' % d)

        return(dict(sample.contents.snode_root))
    def __init__(self, params):
        self.Data = data(params)
        self.params = params

        self.lstm_source = lstm_source_(self.params)
        self.lstm_target = lstm_target_(self.params)
        self.lstm_source.apply(self.weights_init)
        self.lstm_target.apply(self.weights_init)
        embed = list(self.lstm_source.parameters())[0]
        embed[self.params.vocab_dummy].data.fill_(0)
        embed = list(self.lstm_target.parameters())[0]
        embed[self.params.vocab_dummy].data.fill_(0)
        if self.params.use_GPU:
            self.lstm_source = self.lstm_source.cuda()
            self.lstm_target = self.lstm_target.cuda()
        self.softmax = softmax_(self.params)
        self.softmax.apply(self.weights_init)
        if self.params.use_GPU:
            self.softmax = self.softmax.cuda()
        self.output = self.params.output_file
        if self.output != "":
            with open(self.output, "w") as selfoutput:
                selfoutput.write("")
        if self.params.PersonaMode:
            print("training in persona mode")
        else:
            print("training in non persona mode")
        self.ReadDict()
def combinação():  #15 linhas
    print(' ', data.data())
    t = int(
        input(f'''
[1] Cálculo concentral inicial: poço 1A
[2] Cálculo do FIC

'''))
    if t == 1:
        C1 = float(input(f' Qual a concentração da solução estoque? '))
        print(f' A quantidade a ser retirada é de ')
    if t == 2:
        IC50AC = float(input(f' IC50 da droga A em combinação? '))
        IC50A = float(input(f' IC50 da droga A sozinha? '))
        IC50BC = float(input(f' IC50 da droga B em combinação? '))
        IC50B = float(input(f' IC50 da droga B sozinha? '))
        ΣFIC = (IC50AC / IC50A) + (IC50BC / IC50B)
        print(f''' 
ΣFIC = (IC50AC/IC50A) + (IC50BC/IC50B)
ΣFIC = ({IC50AC}/{IC50A}) + ({IC50BC}/{IC50B})
ΣFIC = ({round((IC50AC/IC50A),2)}) + ({round((IC50BC/IC50B),2)})
ΣFIC = {round(ΣFIC,2)}''')

        print()
        print(f'>>> O ΣFIC resultante é de {virgula.virgula(round(ΣFIC,2))}')
Example #12
0
def run(run_id=None,
        mode='normal',
        loaded_data=None,
        split_id=None,
        input_form=config.INPUT_FORM,
        label_form="outcome",
        hyperparameters=dict()):
    if run_id is None:
        run_id = int(datetime.utcnow().timestamp())
    if split_id is None:
        split_id = run_id

    if mode == 'normal':
        if loaded_data is None:
            # create the data objects
            training, validation, test = data(split_id,
                                              input_form=input_form,
                                              label_form=label_form)
        else:
            training, validation, test = loaded_data
        model_instance = model(input_form,
                               aux_size=training.features_size,
                               hyperparameters=hyperparameters)
        # return trained model
        return train(model_instance, training, validation, run_id, 'val_loss')
    elif mode == 'cross':
        # training, validation, test, holdout_test = loaded_data
        training, validation, test = loaded_data
        model_instance = model(input_form,
                               aux_size=training.features_size,
                               hyperparameters=hyperparameters)
        return train(model_instance, training, validation, run_id, 'val_loss')
Example #13
0
def panel_data(pdata=None):
    comReturn = comm.local()
    if comReturn: return comReturn
    import data
    dataObject = data.data()
    defs = ('setPs', 'getData', 'getFind', 'getKey')
    return publicObject(dataObject, defs, None, pdata)
    def prepare(self):
        dataset = data()

        uf_num = 0
        self.userX = None
        self.uIDx = {}
        vf_num = 0
        self.itemX = None
        self.vIDx = {}

        self.graph = dataset.loadGraph(self.params['graph'],
                                       string.atoi(self.params['dim']))
        feature, f_num, fvIDx = dataset.loadFeature2(self.params['feature'])
        self.drawer = sampler_CA_BPR(self.graph, feature, fvIDx, self.Z, \
                                     lambda_setting=string.atoi(self.params['seed_num']), \
                                     thred=float(self.params['thred']))

        if (self.params.has_key('userX')):
            userX, uf_num, self.uIDx = dataset.loadFeature3(
                self.params['userX'], self.drawer.user_set)
            self.userX = dataset.constructSparseMat(userX, uf_num)
            # self.userX = numpy.matrix(self.userX)
        if (self.params.has_key('itemX')):
            itemX, vf_num, self.vIDx = dataset.loadFeature3(
                self.params['itemX'], self.drawer.item_set)
            # self.itemX = numpy.matrix(self.itemX)
            self.itemX = dataset.constructSparseMat(itemX, vf_num)

        #feature, f_num, fvIDx = dataset.loadFeature2(self.params['feature'])
        #feature = dataset.constructSparseMat(feature, f_num).todense()
        self.model = CA_BPR_Model(string.atoi(self.params['k']),
                                  self.drawer.user_set, self.drawer.item_set,
                                  uf_num, vf_num)
    def __init__(self, params):
        self.Data=data(params)
        self.params=params

        self.lstm_source =lstm_source_(self.params)
        self.lstm_target =lstm_target_(self.params)
        self.lstm_source.apply(self.weights_init)     # weights_init is a member function
        self.lstm_target.apply(self.weights_init)     # apply: Applies fn recursively to every submodule (as returned by .children()) as well as self.
       
        embed=list(self.lstm_source.parameters())[0]    #sembedding from lstm_source, 25010*512
        embed[self.params.vocab_dummy].data.fill_(0)    
        embed=list(self.lstm_target.parameters())[0]    #embedding from lstm_target, 25010*512
        embed[self.params.vocab_dummy].data.fill_(0)
        
        if self.params.use_GPU:
            self.lstm_source=self.lstm_source.cuda()
            self.lstm_target=self.lstm_target.cuda()
        self.softmax=softmax_(self.params)
        self.softmax.apply(self.weights_init)
        
        if self.params.use_GPU:
            self.softmax=self.softmax.cuda()
        self.output=self.params.output_file     # save/testing/log or save/testing/non_persona/log
        
        # Creating an output file if it doesn't exist
        if self.output!="":
            with open(self.output,"w") as selfoutput:
                selfoutput.write("")
                
        if self.params.PersonaMode:
            print("training in persona mode")
        else:
            print("training in non persona mode")
            
        self.ReadDict()
Example #16
0
def main():
    params = {}
    params['net_filename'] = "model96.pth"
    params['rf_size'] = 96
    params['rf_stride'] = 4
    params['gpu'] = 0
    params['num_epochs'] = 500
    params['num_batches'] = 30
    params['num_minibatches'] = 30
    params['minibatch_size'] = 150
    params['heatmap_decay'] = 0.75

    image_dir = ""
    mask_dir = ""
    heatmap_dir = ""

    data = d.data((params['rf_size'], params['rf_size']))
    data.load_from_directories(image_dir, mask_dir, heatmap_dir)

    # train

    # Create the network.
    net = Net()
    print(net)

    # load the weights
    if os.path.isfile(params['net_filename']):
        net.load_state_dict(torch.load(params['net_filename']))
    net.cuda(params['gpu'])

    train(net, data, params)
def train_ideep(protein, features, save_path, seed=None):

    train, valid, test = data(protein, features, seed=seed)
    model = model_ideep(train[0], features, seed=seed)

    list_train = [train[0][feature] for feature in features]
    list_valid = [valid[0][feature] for feature in features]
    list_test = [test[0][feature] for feature in features]

    if seed is not None:
        np.random.seed(seed)

    earlystopper = EarlyStopping(monitor='val_loss', patience=5, verbose=0)
    print('model training')
    model.fit(list_train,
              train[1],
              batch_size=100,
              epochs=20,
              verbose=0,
              validation_data=(list_valid, valid[1]),
              callbacks=[earlystopper])

    predictions = model.predict_proba(list_test)
    auc = roc_auc_score(test[1], predictions)
    print("Test AUC: ", auc)
    dt = pd.DataFrame({"y_true": test[1], "y_pred_proba": predictions[:, 0]})
    if not os.path.isdir(save_path):
        os.makedirs(save_path)
    dt.to_csv(os.path.join(save_path, protein + ".csv"))

    return auc
Example #18
0
    def write(self,fileout):        
        # make data object
        d = data()
        
        # set data file headers
        d.title = "LAMMPS simple chain data file"
        d.headers["atoms"] = len(self.atoms)
        d.headers["bonds"] = len(self.bonds)
        d.headers["atom types"] = self.natype
        d.headers["bond types"] = self.nbtype
        d.headers["xlo xhi"] = (0, self.boxx)
        d.headers["ylo yhi"] = (0, self.boxy)
        d.headers["zlo zhi"] = (0, self.boxz)
        
        # set data file body
        lines = []
        j = 0
        for i in self.mass:
            lines.append("%d %d\n" % (self.atype[j],i))
            j += 1
        d.sections["Masses"] = lines
        
        lines = []
        for i in self.atoms:
            lines.append("%d %d %d %f %f %f %f %f %f\n" % \
                        (i[0],i[1],i[2],i[3],i[4],i[5],i[6],i[7],i[8]))
        d.sections["Atoms"] = lines

        lines = []
        for i in self.bonds:
            lines.append("%d %d %d %d\n" % (i[0],i[1],i[2],i[3]))
        d.sections["Bonds"] = lines 
       
        # write to file
        d.write(fileout)
Example #19
0
def albrecht():
    return data(name="albrecht",
                rows=[[
                    "$In", "$Out", "$Query", "$File", "$FpAdj", "$RawFPcounts",
                    "?AdjFp", "<Effort"
                ], [25, 150, 75, 60, 1, 1750, 1750, 102.4],
                      [193, 98, 70, 36, 1, 1902, 1902, 105.2],
                      [70, 27, 0, 12, 0.8, 535, 428, 11.1],
                      [40, 60, 20, 12, 1.15, 660, 759, 21.1],
                      [10, 69, 1, 9, 0.9, 478.89, 431, 28.8],
                      [13, 19, 0, 23, 0.75, 377.33, 283, 10],
                      [34, 14, 0, 5, 0.8, 256.25, 205, 8],
                      [17, 17, 15, 5, 1.1, 262.73, 289, 4.9],
                      [45, 64, 14, 16, 0.95, 715.79, 680, 12.9],
                      [40, 60, 20, 15, 1.15, 690.43, 794, 19],
                      [41, 27, 29, 5, 1.1, 465.45, 512, 10.8],
                      [33, 17, 8, 5, 0.75, 298.67, 224, 2.9],
                      [28, 41, 16, 11, 0.85, 490.59, 417, 7.5],
                      [43, 40, 20, 35, 0.85, 802.35, 682, 12],
                      [7, 12, 13, 8, 0.95, 220, 209, 4.1],
                      [28, 38, 24, 9, 1.05, 487.62, 512, 15.8],
                      [42, 57, 12, 5, 1.1, 550.91, 606, 18.3],
                      [27, 20, 24, 6, 1.1, 363.64, 400, 8.9],
                      [48, 66, 13, 50, 1.15, 1073.91, 1235, 38.1],
                      [69, 112, 21, 39, 1.2, 1310, 1572, 61.2],
                      [25, 28, 4, 22, 1.05, 476.19, 500, 3.6],
                      [61, 68, 0, 11, 1, 694, 694, 11.8],
                      [15, 15, 6, 3, 1.05, 189.52, 199, 0.5],
                      [12, 15, 0, 15, 0.95, 273.68, 260, 6.1]])
Example #20
0
 def __init__(self):
     self.minist = da.data().minist
     self.x = tf.placeholder(tf.float32, [None, 784])
     self.y = tf.placeholder(tf.float32, [None, 10])
     self.prediction, self.loss, self.train_step, self.accuracy = self.get_model()
     self.train_model()
     pass
Example #21
0
def main():

    again='y'
    while again=='y':
        d16,d17,d18,d19,d20=data_frames()
        train_data,train_labels=data(d16,d17,d18,d19,int(input('What is the'+
        ' points cutoff? ')))

        k=input('k (separate by space)? ').split()
        k = [ int(x) for x in k ]
        good=[]
        for k_n in k:
            model = KNeighborsClassifier(n_neighbors=k_n)
            model.fit(train_data, train_labels)

            names=d19['full_name'].to_numpy()
            test_data=d19.drop('full_name',1).to_numpy()
            predictions = model.predict(test_data)

            i=0
            while i<len(names):
                if predictions[i]==1:
                    good.append(names[i])
                i+=1

        ind=0
        great=[]
        if len(k)==1:
            great=good
        else:
            while ind < len(good):
                if good.count(good[ind])>1:
                    if good[ind] not in great:
                        great.append(good[ind])
                ind+=1

        #print(good)
        #print(great)

        results=d20.drop(['goals_scored','assists','total_points','minutes',
        'goals_conceded','creativity','influence','threat','bonus','bps',
        'ict_index','clean_sheets','red_cards','yellow_cards',
        'selected_by_percent'],1)
        for i in results.index:
            if results['full_name'][i] not in great:
                results.drop(i,inplace=True)

        print(results)
        #results.to_csv('results.csv')

        price=int(float(input('Price cutoff? '))*10)
        for i in results.index:
            if results['now_cost'][i] > price:
                results.drop(i,inplace=True)

        print(results)



        again = input("Run again? (y/n) ")
    def __init__(self, params):
        with open(path.join(params.model_folder, params.params_name),
                  'rb') as file:
            adapted_params = pickle.load(file)
        for key in vars(params):
            vars(adapted_params)[key] = vars(params)[key]
        adapted_params.dev_file = adapted_params.decode_file
        self.params = adapted_params
        if self.params.SpeakerMode:
            print("decoding in speaker mode")
        elif self.params.AddresseeMode:
            print("decoding in speaker-addressee mode")
        else:
            print("decoding in non persona mode")

        self.ReadDict()
        self.Data = data(self.params, self.voc)

        self.device = torch.device(
            "cuda" if torch.cuda.is_available() else "cpu")
        if self.params.cpu:
            self.device = "cpu"

        self.Model = lstm_decoder(self.params, len(self.voc), self.Data.EOT)
        self.readModel(self.params.model_folder, self.params.model_name)
        self.Model.to(self.device)
        self.ReadDictDecode()

        self.output = path.join(self.params.output_folder,
                                self.params.log_file)
        if self.output != "":
            with open(self.output, "w") as selfoutput:
                selfoutput.write("")
Example #23
0
    async def add_command(self, ctx, *args):
        """  add users to the plan

        `$add[a] <@Name, Name, РђюName With SpacesРђЮ>'`

        Example: 
        $a
        $add "Chis Bot"
        $a chis unholydog106
        """
        await ctx.message.delete()
        match = data(ctx.guild)
        args = list(map(lambda user: closest_user(user, ctx.guild), args))
        if len(args) == 0:
            args = [ctx.author]

        logging.info(f'{ctx.author} tried to add {*args,} to the plan')

        for user in args:
            if match.people < match.spots:
                if not match.add_gamer(user):
                    await ctx.send(f'{user} is already a gamer.')
            else:
                await ctx.send(f'Cannot add {user}, too many gamers.')
        await update_message(ctx, self.match_messages, await self.match_message(ctx, match))
Example #24
0
	def __init__(self, params):
		self.params=params
 
		self.ReadDict()
		self.Data=data(params,self.voc)

		self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
		if params.cpu:
			self.device="cpu"
		self.Model = lstm(params,len(self.voc),self.Data.EOT)
		self.Model.encoder.apply(self.weights_init)
		self.Model.decoder.apply(self.weights_init)
		self.Model.softlinear.apply(self.weights_init)
		self.Model.to(self.device)

		self.output=path.join(params.save_folder,params.output_file)
		if self.output!="":
			with open(self.output,"w") as selfoutput:
				selfoutput.write("")
		if self.params.SpeakerMode:
			print("training in speaker mode")
		elif self.params.AddresseeMode:
			print("training in speaker-addressee mode")
		else:
			print("training in non persona mode")
Example #25
0
def isA_extraction():
    '''
    S: sentences from web corpus that match the hearst pattern
    the main entry point for isA extractin

    --> set of isA pairs(x, y)
    '''
    # for sentence in S:
    #     syntactic_extraction(sentence)


    pool = Pool(20)
    tasks = []
    logging.info('loading the data......')
    for s in data():
        logging.info('getting {}'.format(s.index))
        tasks.append(s)
        # logging.info('added {}'.format(s[0]))
    logging.info('loading completed.')

    results = pool.map(generate_isa_pairs, tasks)
    pool.close()
    pool.join()
    logging.info('Complete!! There is total {} pairs'.format(len([x for x in results if x is True])))

    return
Example #26
0
def create_bullet(name, x, y, z, dir_x, dir_z, tt, damage):
    import constants
    
    if tt == constants.BulletMelee:
        # melee attack
        __melee_attack(name, x, z, dir_x, dir_z, damage);
        return
        
    import network, game
 
    global BULLET_ID
    BULLET_ID += 1
    bid = BULLET_ID
    
    # 1. broadcast
    bpkt = network.packet.packet(network.events.MSG_SC_OTHER_BULLET,
                                     id = bid,
                                     name = name, 
                                     x = x, y = y, z = z, 
                                     dir_x = dir_x, dir_z = dir_z,
                                     tt = tt, 
                                     velocity = constants.BulletVelocity)

    game.controller.gcontroller.broadcast(bpkt)
    
    # 2. save in server
    import data
    d = data.data(ply = name, id = bid, dir_x = dir_x, dir_z = dir_z, damage = damage)
    game.controller.gcontroller.Bullets[bid] = d
        
    return d
Example #27
0
 def train_model(self):
     init = tf.global_variables_initializer()
     batch_size = 20
     train_steps = 100000
     test_steps = 100
     data = da.data()
     train_data = data.train_data
     test_data = data.train_data
     with tf.Session() as sess:
         sess.run(init)
         for i in range(train_steps):
             batch_data, batch_labels = train_data.next_batch(batch_size)
             loss_val, acc_val, _ = sess.run([self.loss, self.accuracy, self.train_op],
                                             feed_dict={self.x: batch_data, self.y: batch_labels})
             if (i + 1) % 500 == 0:
                 print('[Train] Step: %d, loss: %4.5f, acc: %4.5f' % (i + 1, loss_val, acc_val))
             if (i + 1) % 5000 == 0:
                 # test_data = CifarData(test_filenames, False)
                 all_test_acc_val = []
                 for j in range(test_steps):
                     test_batch_data, test_batch_labels = test_data.next_batch(batch_size)
                     test_acc_val = sess.run([self.accuracy],
                                             feed_dict={self.x: test_batch_data, self.y: test_batch_labels})
                     all_test_acc_val.append(test_acc_val)
                 test_acc = np.mean(all_test_acc_val)
                 print('[Test] Step: %d,acc: %4.5f' % (i + 1, test_acc))
 def prepare(self):
     dataset = data()
     self.graph = dataset.loadGraph(self.params['graph'],
                                    string.atoi(self.params['dim']))
     self.drawer = sampler(self.graph, self.Z)
     self.model = BPR_model(self.Z, self.drawer.user_set,
                            self.drawer.item_set)
Example #29
0
def run_each_frame(file, topo):
    alld, pbc = sca.get_coord(file, topo)
    res_d = {}  # contains information for each frame

    avg_dict = {}
    count = 0

    for frame in alld:
        a, b, c = pbc[frame]
        d, links_h, links_o, hbonds, obonds = data.data(alld[frame], a, b, c)

        temp_d = rings.job(d, links_h, links_o, hbonds, obonds, a, b, c)

        for i in temp_d:
            temp_d[i] = temp_d[i]
            if i not in avg_dict:
                avg_dict[i] = temp_d[i]
            else:
                avg_dict[i] += temp_d[i]
        count += 1

        res_d[frame] = temp_d

    print file, 'Number of frames', len(res_d)
    for i in avg_dict:
        avg_dict[i] = avg_dict[i] / count

    print res_d
    print avg_dict

    np.save(file[:-6] + '_RING_perFrame.npy', res_d)
    np.save(file[:-6] + '_RING_avg.npy', avg_dict)
Example #30
0
def run_each_frame(file, topo):
    alld, pbc = sca.get_coord(file, topo)
    res_d = {}  # contains information for each frame

    avg_dict = {}
    count = 0

    for frame in alld:
        a, b, c = pbc[frame]
        d, links_h, links_o, hbonds, obonds = data.data(alld[frame], a, b, c)

        coord = np.array([d[i][1:] for i in d])

        temp_d = cluster.cluster(coord, file)

        for i in temp_d:
            temp_d[i] = temp_d[i]
            if i not in avg_dict:
                avg_dict[i] = temp_d[i]
            else:
                avg_dict[i] += temp_d[i]
        count += 1

        res_d[frame] = temp_d

    print file, 'Number of frames', len(res_d)
    for i in avg_dict:
        avg_dict[i] = avg_dict[i] / count

    np.save(file[:-6] + '_CMS_perFrame.npy', res_d)
    np.save(file[:-6] + '_CMS_avg.npy', avg_dict)
def diluicao_droga():  #11 linhas
    C1 = float(input(f' Conc. da solução estoque (µM): '))

    C2 = float(input(f' Conc. da solução desejada (µM): '))

    V2 = float(input(f' Volume de diluição (µL):'))
    print()

    V1 = round(((C2 * V2) / C1), 2)

    volume_de_meio = V2 - V1
    print()
    print(' ', data.data())
    print()
    print(f''' =============================
C1V1 = C2V2

{C1} x V1 = {C2} x {V2}
V1 = {C2} x {V2}/{C1}
V1 = {round((C2*V2/C1),2)} 
=============================''')
    print()

    print(f' Volume de droga = {virgula.virgula(V1)} µL')
    print()
    print(f' Volume de meio = {virgula.virgula(volume_de_meio)} µL')
    print()
Example #32
0
 def write_lammps_file(self, filename):
     d = data()
     d.title = "Lammps data file; Rough surface stats - " + self.spectrum.get_info()
     d.headers = {}
     d.sections = {}
     dd = 2**(1/6)
     dx, dy = dd/2, dd/2
     atom_type       = 1
     M, N      = self.heights.shape
     Zs       = self.heights.real
     xlo, ylo = - (M*dx)/2, -(N*dy)/2
     d.headers["zlo zhi"] = (np.min(Zs), np.max(Zs))
     d.headers["xlo xhi"] = (xlo, xlo + M*dx)
     d.headers["ylo yhi"] = (ylo, ylo + N*dy)
     d.headers["atoms"]   = M*N
     d.headers["atom types"] = atom_type
     atom_lines      = []
     velocity_lines  = []
     atom_id         = 1
     for i in range(M):
         for j in range(N):
             atom_line = "%d %d %d %g %g %g %d %d %d\n" % (atom_id, 0, atom_type, xlo+i*dx, ylo+j*dy, Zs[i, j], 0, 0, 0)
             atom_lines.append(atom_line)
             velocity_lines.append("%d 0 0 0\n" %atom_id)
             atom_id   += 1        
     d.sections["Atoms"]       = atom_lines
     d.sections["Velocities"]  = velocity_lines
     d.sections["Masses"]      = ["%d %g\n" %(atom_type, 1)]
     d.write(filename)
Example #33
0
    async def addall_command(self, ctx, *args):
        """ add all users currently in the voice channel to the plan

        `$addall[aa]`

        Example: 
        $aa
        """
        await ctx.message.delete()
        voice_channels = ctx.guild.voice_channels
        match = data(ctx.guild)

        if ctx.author.voice is None:
            await ctx.send(f'{ctx.author} is not in a voice channel.')
            return

        for channel in voice_channels:
            if ctx.author.voice.channel != None and ctx.author.voice.channel is channel:
                members = sorted(
                    channel.members, key=lambda user: self.activity_check(user), reverse=True)

                for user in members:
                    if match.people < match.spots:
                        if not match.add_gamer(user):
                            await ctx.send(f'{user} is already a gamer.')
                    else:
                        await ctx.send(f'Cannot add {user}, too many gamers.')
        await update_message(ctx, self.match_messages, await self.match_message(ctx, match))
Example #34
0
def run_each_frame(file, topo):
	print file
	alld, pbc = sca.get_coord(file, topo)
	res_d = {} # contains information for each frame

	avg_dict = {}
	count = 0

	for frame in alld:
		a,b,c = pbc[frame]
		d,links_h,links_o,hbonds,obonds=data.data(alld[frame], a, b, c)
		temp_d = hbtype.result(d,links_h,links_o,hbonds,obonds)
		#print temp_d
		for i in temp_d:
			temp_d[i] = len(temp_d[i])
			if i not in avg_dict:
				avg_dict[i] = temp_d[i]
			else:
				avg_dict[i] += temp_d[i]
		count += 1

		res_d[frame] = temp_d

	print file, 'Number of frames', len(res_d)
	for i in avg_dict:
		avg_dict[i] = avg_dict[i]/count

	#visual(avg_dict, file[:-6])

	np.save(file[:-6]+'_HB_perFrame.npy', res_d)
	np.save(file[:-6]+'_HB_avg.npy', avg_dict)
def createDatasets():
    for featuresType in ['MFCC', 'Speaker_trait']:
        df_train,df_test = data.data(featuresType)

        df_train = df_train.fillna(0)#all NaN replaced by 0
        if not(onlyFrench):
            df_test = df_test.fillna(0)#all NaN replaced by 0

        # df_train = df_train.sample(n=5000, random_state=17)
        # df_test = df_test.sample(n=10000, random_state=17)

        dataset = data.restructure_data(df_train)
        if not(onlyFrench):
            dataset_test = data.restructure_data(df_test)

        if onlyFrench:
            dataset['data'] = dataset['data'].values.astype('float')
            dataset['target'] =dataset['target'].values.astype('str')
        else:
            dataset['data'] = np.concatenate([dataset['data'].values.astype('float'), dataset_test['data'].values.astype('float')])
            dataset['target'] = np.concatenate([dataset['target'].values.astype('str'),
                                         dataset_test['target'].values.astype('str')])
            dataset['language'] = np.concatenate([dataset['language'].values.astype('str'),
                                         dataset_test['language'].values.astype('str')])
        print("Scaling")
        standard_scaler = StandardScaler()
        dataset['data'] = standard_scaler.fit_transform(dataset['data'])

        if(onlyFrench):
            np.savez('UL/onlyFrench-'+featuresType+'-dataset.npz', data=dataset['data'], target=dataset['target'], language=dataset['language'])    
        else:
            np.savez('UL/215832_frames-'+featuresType+'-dataset.npz', data=dataset['data'], target=dataset['target'], language=dataset['language'])
Example #36
0
def run(model, description):
    run_id = str(uuid4())

    model.run(run_id)
    K.clear_session()

    model_instance = evaluate.load(os.path.join(
        config.MODEL_DIR,
        "{}-{}.h5".format(run_id, model.MODEL_NAME),
        ))

    train, validation, test_data = data()
    train_data_stats = characterize_data(train)
    validation_data_stats = characterize_data(validation)
    test_data_stats = characterize_data(test_data)
    results = test(model_instance, train, validation, test_data)

    result = Result(
        model.MODEL_NAME,
        run_id,
        train_data_stats,
        validation_data_stats,
        test_data_stats,
        description,
        **results
        )
    db.session.add(result)
    db.session.commit()
Example #37
0
def BackupM():

    a = 1
    count = 0
    print('iniciando...')
    while a in range(len(nome)+1):

        try:
            print(nome[a-1])
            paf = ('/home/%s/backup.tar.gz'%nome[a-1])
            ssh = paramiko.SSHClient()
            ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
            ssh.connect(ip[a-1],username= nome[a-1],password= senha[a-1])
            sftp = ssh.open_sftp()
            #stdin, stdout, stderr = ssh.exec_command('tar -czvf /home/%s/backup.tar.gz /home/%s/Backup'%(nome[a-1],nome[a-1]))
            sftp.get(format(paf),'/home/user208/%s%s.tar.gz'%(nome[a-1],data.data()))
            sftp.close()

            paf = ("cp /home/user208/'%s%s.tar.gz' /home/user208/Dropbox"%(nome[a-1],data.data()))
            os.system(paf)
            print('\nEnvio concluído.\n')

        except:
            count += 1
            falha.append(nome[a-1])

        a += 1
    print('O backup foi finalizado. Das %d máquinas, %d não fizeram backup.'%(len(nome), count))

    smtp = smtplib.SMTP_SSL('smtp.mail.yahoo.com', 465)

    smtp.login('mail', 'password')

    de = 'who send'
    para = ['from']

    msg = """From: %s
    To: %s
    Subject: Backup

    Backup feito.""" % (de, ', '.join(para))

    smtp.sendmail(de, para, msg)

    smtp.quit()
    print('Email enviado.\n')
Example #38
0
 def icalc_next(self):
     self.__last_time = timer.gtimer.current()
     
     curr_waypoint = self.__path[self.__pos - 1]
     next_waypoint = self.__path[self.__pos]
     
     delta_x = next_waypoint[0] - curr_waypoint[0]
     delta_z = next_waypoint[1] - curr_waypoint[1]
     
     L = delta_x ** 2 + delta_z ** 2
     L = math.sqrt(L)
     
     if L <= 0:
         self.__dir = data.data(x = delta_x, z = delta_z)
     else:        
         self.__dir = data.data(x = delta_x / L, z = delta_z / L)
     self.__last_waypoint = data.data(x = curr_waypoint[0], z = curr_waypoint[1])
     self.__next_waypoint = data.data(x = next_waypoint[0], z = next_waypoint[1])
     self.__L = L
Example #39
0
def test_basic_parser():
	
	# data_file = '/Users/Zhe/Desktop/sat_july_2014/data_2014.07.15/test'
	data_file = subdir + '/'+'empty'
	input= 'input.txt'

	original = data(data_file)
	a = basic_parser(subdir + '/'+input).parser()

	original.update(a,output = 'test_output')
Example #40
0
def test_sentence_parser():
	print subdir
	# data_file = '/Users/Zhe/Desktop/sat_july_2014/data_2014.07.15/test'
	data_file = subdir + '/'+'empty'
	input= 'passage_test'

	original = data(data_file)
	a = sentence_parser(subdir + '/'+input).parser()

	original.update(a,output = 'test_output')
	print original
Example #41
0
 def problem1(self, test_docs, train_docs, mean_sent=2.5):
     print "Sentiment Analysis: <Problem 1> "
         #creates awesome data object for each paragraph in document
     data_list = []
     for doc in train_docs:
         for (par, rating) in doc.get_par_rating_tuples():
         #Protecting against the chance of a failed parse
             if (par is not None and rating is not None):
                 data_list.append(data(par.lower(), rating, doc.filename, doc.author))
             else:
                 print "Found bad review by -> " + doc.author + " (this comes from: par = None)"
     self.complete(data_list)
Example #42
0
def importer(df,Si,Ii,Fi,colsY,nTests=0,test=False):
    df = ajoutDate(df)
    colsX,S,I,F = numerote(Si,Ii,Fi)
    X,y = prepare(df,colsX,colsY)
    X,y = featureScale(S,X,y)
    #X,y = decoupeJours(X,y,nTests,colsX) #On passe à un jour
    E,X,colsX = convertirE(F,X,colsX,test=True)
    d = data.data(X,y,colsX,Ii)
    if(nTests!=0):
        idx = np.random.choice(np.arange(0,np.shape(X)[0]),nTests,replace=False)
        print(pd.DataFrame(X[idx],columns=colsX))
    return d
Example #43
0
def Backup(nomex,ipx,senhax):

    try:
        paf = ('/home/%s/backup.tar.gz'%nomex)
        ssh = paramiko.SSHClient()
        ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
        ssh.connect(ipx,username= nomex,password= senhax)
        #stdin, stdout, stderr = ssh.exec_command('tar -czvf /home/%s/backup.tar.gz /home/%s/Backup'%(nomex,nomex))
        sftp = ssh.open_sftp()
        sftp.get(format(paf),'/home/user208/%s%s.tar.gz'%(nomex,data.data()))
        sftp.close()
#------------------------Envio ao dropbox-------------------------------------
        paf = ("cp /home/user208/'%s%s.tar.gz' /home/user208/Dropbox"%(nomex,data.data()))
        os.system(paf)
        print('\nEnvio concluído.\n')

    except:
        os.system('clear')
        print('Ops! Alguns dos parametros passados podem estar errados.')
        time.sleep(2)
        os.system('clear')
Example #44
0
    def load(self):
        fp = open(self._file_name, 'r')

        def get_file(fp):
            for line in fp.readlines():
                yield line

        for line in get_file(fp):
            entry = data(line, self._sep, self._key_pos)
            self._lns[entry.key()] = entry

        fp.close()
Example #45
0
def test_sentence_parser():
	
	subdir = '/Users/Zhe/Desktop/sat_july_2014/collector'
	# data_file = '/Users/Zhe/Desktop/sat_july_2014/data_2014.07.15/test'
	data_file = subdir + '/'+'empty'
	input= 'mock_test'
    
	original = data(data_file,mode = 'normal')
	a = cr_practice_parser(subdir + '/'+input).parser()

	original.update(a,output = 'test_output')
	print original
def test_mock_parser():
	
	# data_file = '/Users/Zhe/Desktop/sat_july_2014/data_2014.07.15/test'
	data_file = subdir + '/'+'empty'
	input= 'mock_test'

	original = data(data_file)
	a = CRmock_parser(subdir + '/'+input).parser()

	original.update(a, output = subdir + '/'+'test_output')
	print original
	#print_dict(empty)

#test_mock_parser()
Example #47
0
    def problem2(self, test_docs, train_docs):
        print "Sentiment Analysis: <Problem 2> "
        data_list = []
#creates awesome data object for each document
        for doc in train_docs:
        #pars = [par.lower() for par in doc.get_pars() if par is not None]
        #ratings = [rating for rating in doc.get_ratings() if rating is not None]
            par = doc.get_pars()[3]
            rating = doc.get_ratings()[3]
            if (par is not None and rating is not None):
                data_list.append(data(par, rating, doc.filename, doc.author))
            else:
                print "Found bad review by -> " + doc.author + " (this comes from: par = None)"
        
        self.complete(data_list)
Example #48
0
def test(arguments):
    param_combo, params, features, shorthands = arguments
    params = { k:v for k,v in params.items() }
    params['subfolder name'] = '_'.join(['%s_%s'%(sh,pv) for sh,pv in zip(shorthands,param_combo)])
    for pv, fe in zip(param_combo, features): params[fe] = pv
    classifiers = {'gcm':gcm, 'gnb':gnb, 'alcove':alcove, 'som':som}
    classifier = classifiers[params['classifier']]
    d = data(params)
    for simulation in range(params['n simulations']):
        c = classifier(d, params, simulation)
        time = 0
        while time < params['length simulation']:
            time += params['test interval']
            c.load(simulation, time)
            c.test()
Example #49
0
 def __init__(self, d, conn):
     self.name = d.name
     self.__conn = conn
     self.__state = constants.PLAYER_STATE_LOGIN
     conn.ply = self 
     self.bag = bag.bag(self)
     self.bag.add(constants.ItemBullet, 10000)
     self.dead = False
     self.pos = data.data(x = 1000, z = 1000)
     
     # data
     self.hero = "hero"
     self.weapon = constants.WeaponNormal
     self.run_distance = 0
     self.point = 0
Example #50
0
def train_and_test(arguments):
    param_combo, params, features, shorthands = arguments
    params = { k:v for k,v in params.items() }
    params['subfolder name'] = '_'.join(['%s_%s'%(sh,pv) for sh,pv in zip(shorthands,param_combo)])
    for pv, fe in zip(param_combo, features): params[fe] = pv
    classifiers = {'gcm':gcm, 'gnb':gnb, 'alcove':alcove, 'som':som}
    classifier = classifiers[params['classifier']]
    dirname = '%s/%s' % (params['folder name'], params['subfolder name'])
    os.makedirs(dirname)
    with open('%s/parameters.p' % dirname, 'wb') as fh:
        pickle.dump(params, fh)
    #
    d = data(params)
    for simulation in range(params['n simulations']):
        c = classifier(d, params, simulation)
        c.train(dump = False, test = True)
Example #51
0
  def __init__( self, parent=None ):
    QtGui.QMainWindow.__init__( self, parent )
    self.setWindowTitle( 'Plotduino' )
    
    # the data acquisition
    self.data_source = data.data()
    self.data_source.selection = 0
    
    # time interval for the plot update
    self.timestep = 200
    
    # timer for updating the plot
    self.timer = QtCore.QTimer(self)

    #create the layout and connect the signals to the buttons
    self.set_layout()
    self.connectSignals()
Example #52
0
    def opn(self, etc, url):
        if self.up_win:
            self.temp_h = gtk.VBox()
            self.temp_l = gtk.Label()
            self.temp_l.set_markup(
                '<span color="green" size="16000">Loading....</span>')
            self.temp_h.pack_start(self.temp_l, True)
            self.vbox_one.pack_start(self.temp_h, False)
            self.temp_h.show()
            self.vbox_one.show_all()
            self.motion('')

        queue = Queue.Queue()
        self.loader_cont.show()
        dt = data()
        data_thread = threading.Thread(target=dt.get_data, args=(url, queue))
        data_thread.start()

        def idont(self):
            xor = queue.get()
            if xor == 1 or xor == 2:
                store = xor
            else:
                store = 0
            if store == 2:
                self.loader_cont.hide()
                self.temp_cont = gtk.HBox()
                self.temp_lab = gtk.Label()
                self.temp_lab.set_markup(
                    '<span size="20000" color="red">ERROR: Connection failed</span>')
                self.temp_cont.pack_start(self.temp_lab, True)
                self.view1.remove(self.text_view1)
                self.view1.add(self.temp_cont)
                self.view1.show_all()
                return False
            if store == 1:
                load()
                self.vbox_two.show_all()
                self.refresh()
                self.loader_cont.hide()
                self.temp_h.pack_end(self.temp_l)
                self.temp_h.hide()
                return False
            return True
        gobject.timeout_add(1, idont, self)
Example #53
0
File: chain.py Project: aaigner/LPP
  def write(self,file):
    if len(self.atoms) != self.n:
      raise StandardError,"%d monomers instead of requested %d" % \
                           (len(self.atoms),self.n)

    list = [atom[2] for atom in self.atoms]
    atypes = max(list)

    btypes = 0
    if len(self.bonds):
      list = [bond[1] for bond in self.bonds]
      btypes = max(list)

    # create the data file

    d = data()
    d.title = "LAMMPS FENE chain data file"
    d.headers["atoms"] = len(self.atoms)
    d.headers["bonds"] = len(self.bonds)
    d.headers["atom types"] = atypes
    d.headers["bond types"] = btypes
    d.headers["xlo xhi"] = (self.xlo,self.xhi)
    d.headers["ylo yhi"] = (self.ylo,self.yhi)
    d.headers["zlo zhi"] = (self.zlo,self.zhi)

    lines = []
    for i in range(atypes): lines.append("%d 1.0\n" % (i+1))
    d.sections["Masses"] = lines
    
    lines = []
    for atom in self.atoms:
      line = "%d %d %d %g %g %g %d %d %d\n" % \
             (atom[0], atom[1], atom[2], atom[3], atom[4], atom[5],
              atom[6], atom[7], atom[8])
      lines.append(line)
    d.sections["Atoms"] = lines
    
    lines = []
    for bond in self.bonds:
      line = "%d %d %d %d\n" % (bond[0], bond[1], bond[2], bond[3])
      lines.append(line)
    d.sections["Bonds"] = lines

    d.write(file)
Example #54
0
def test():
    test_sent = Sent(1, 'These algorithms include distance calculations, scan conversion, closest point determination, fast marching methods, bounding box creation, fast and incremental mesh extraction, numerical integration and narrow band techniques.', 'D')
    test_s = [test_sent]
    # isA_extraction(data())
    for s in data('B'):
        syntactic_extraction(s)
    # with open(OUTPUT_DIR+'equal.txt', 'w', encoding='utf-8') as f:
    #     for s in data():
    #         # syntactic_extraction(s)
    #         # logging.info(s.text)
    #         r = equal_extract(s.text)
    #         for eq in r:
    #             logging.info(s.text)
    #             logging.info(eq)
    #             # f.write(s.text.strip())
    #             # f.write('\n')
    #             f.write('\t'.join(eq))
    #             f.write('\n')
    #             f.flush()
    #             # stop()
    # return
    return
Example #55
0
def create_item(tt, x, z):
    import network, game
    global ITEM_ID
    ITEM_ID += 1
    bid = ITEM_ID
    
    # 1. save in server
    import data
    d = data.data(id = bid, tt = tt, x = x, z = z)
    game.controller.gcontroller.Items[bid] = d
        
    # 2. broadcast
    bpkt = network.packet.packet(network.events.MSG_SC_ITEM_BORN,
                                     id = bid,
                                     tt = tt,
                                     x = x, 
                                     z = z
                                )

    game.controller.gcontroller.broadcast(bpkt)
    
    return d
Example #56
0
def get_chain(datafile, chainID):
    """
    Get the atom and bond IDs in a polymer chain, as identified by LAMMPS
    molecule ID.

    Inputs:
        datafile: name of lammps data file. Needs second atom column to be type.
        chainID: molecule id for polymer chain.

    Outputs:
        tuple of ([atom IDs],[bond IDs])

    Derek Fujimoto
    July 2016
    """

    # some variables
    chain_atoms = []

    # get atom list of all atoms and bonds
    d = data(datafile)
    all_atoms = d.get("Atoms")
    all_bonds = d.get("Bonds")

    # find only atoms which belong to a given chain ID
    for atom in all_atoms:
        if atom[1] == chainID:
            chain_atoms.append(int(atom[0]))

    # make a list with bonds just from atoms in the atom list
    chain_bonds = []
    for bond in all_bonds:
        if bond[2] in chain_atoms or bond[3] in chain_atoms:
            chain_bonds.append([int(bond[2]), int(bond[3])])

    return (chain_atoms, chain_bonds)
Example #57
0
#!/usr/bin/python

# Copyright (c) 2008, Institute for the Study of Learning and Expertise
# All rights reserved.
# For details, see the LICENSE file.

from ross_entities import *;
from data import data;
from search import *;
import time;

ross1 = data("./ross-sea-yr1.data");
#ross2 = data("./ross-sea-yr2.data");

data_files = [ross1]
#data_files = [ross1,ross2]

print "Start: ", time.strftime("%m/%d/%y %H:%M:%S", time.localtime());

(model, sse, r2, init_state) = exhaustive(lib, "root", data_files);
#(model, sse, r2, init_state) = beam_search(lib, "root", data_files, beam_width = 1, n_fs_restarts = 3,init_state_fit=1);

print "Best model:";
print model;
print "SSE = %g, r2 = %g, init_state = %s\n" % (sse, r2, str(init_state));

model.fit_params(data_files, init_state = init_state, n_tf_restarts = 0, n_fs_restarts = 0);

print "End: ", time.strftime("%m/%d/%y %H:%M:%S", time.localtime());
print "";
Example #58
0
from theano.sandbox.cuda.dnn import dnn_conv

from PIL import Image
import pickle
from lib import activations
from lib import updates
from lib import inits
from lib.rng import py_rng, np_rng
from lib.ops import batchnorm, conv_cond_concat, deconv, dropout, l2normalize
from lib.metrics import nnc_score, nnd_score
from lib.theano_utils import floatX, sharedX
from lib.data_utils import OneHot, shuffle, iter_data, center_crop, patch

from data import data

num_samples, train_set, test_set, val_set, tr_scheme, tr_stream, val_scheme, val_stream, test_scheme, test_stream = data()

def target_transform(X):
    return floatX(X).transpose(0, 3, 1, 2)/127.5 - 1.

def input_transform(X):
    return target_transform(X)

l2 = 1e-5         # l2 weight decay
nvis = 196        # # of samples to visualize during training
b1 = 0.5          # momentum term of adam
nc = 2            # # of channels in image
nbatch = 128      # # of examples in batch
npx = 64          # # of pixels width/height of images
batch_size = 128
nx = npx*npx*nc   # # of dimensions in X
Example #59
0
    
    if not os.path.exists(out_dir):
        os.makedirs(out_dir)
        
    if 'debug' in sys.argv:
        nfolds = 1
        fold_j = 0 
        out_file = os.path.join(out_dir,'./debug.hdf5')
        #pdb.set_trace()
        
    else:
        nfolds = int(sys.argv[1])   # number of folds the genelist is split into (= number of jobs)
        fold_j = int(sys.argv[2])   # fold considered for this job
        out_file = os.path.join(out_dir,'%d_%d.hdf5'%(nfolds,fold_j))

    data = DATA.data(data)
    uKpop = data.getUKpop(center=center,normalize=False)
    
    n_genes = data.Y.shape[1]
    genes = SP.arange(n_genes)
    Icv = SP.floor(nfolds*SP.arange(n_genes)/n_genes)
    I = Icv==fold_j
    genes = genes[I]

    f = h5py.File(out_file,'w')

    for gene_i in genes:
        probeID = data.probeID[gene_i]
        geneID  = data.geneID[gene_i]
        print ""
        print "%s: %s" % (gene_i,probeID)
Example #60
0
def get_msd(rootfile,datafile,style,n_chains,mono_id=-1,draw=False):
    """
    Get the mean squared displacement for a number of different styles.

    Inputs:
        rootfile: name of root file with dump data
        datafile: name of data file used to set up simulation
        style: name of style with which to find the msd
        n_chains: number of chains present in the simulation
        mono_id: monomer id (starting from 0) to use in the COM calculation. Only for the ring_n options.
        draw: if true, draw to TCanvas
    Outputs:
        TGraph with msd from initial position.

    Style Options:
        'ua_ring_com': msd of phenyl ring center of mass for united atom model.
        'ua_ring_n_com': msd of nth ring com for united atom model

    Dependencies:
        pizza.py
        PyROOT

    Derek Fujimoto
    April 2016
    """

    # constants
    THIS = "get_msd"
    TREE_NAME = "dump_dat"
    STYLE_OPTIONS = ['ua_ring_com','ua_ring_n_com']
    OPTION_DICT = dict()

    # check style option input
    if style not in STYLE_OPTIONS:
        print THIS + ": style option not found. Possible options include: "
        for op in STYLE_OPTIONS:
            print op
        exit()

    # set some of the dictionary settings
    OPTION_DICT['n_chain'] = n_chains

    if(style == STYLE_OPTIONS[0] or style == STYLE_OPTIONS[1]):
        OPTION_DICT['n_atom'] = 8
        OPTION_DICT['n_monomer'] = 10
        OPTION_DICT['n_cap'] = 1
        OPTION_DICT['n_skip'] = 2

    if(style == STYLE_OPTIONS[1]):
        OPTION_DICT['mono_id'] = mono_id

    # indicies of things in snapshot TH2D
    ID = 0
    MOL = 1
    TYPE = 2
    XYZ = [3,4,5]

    # Open rootfile and tree
    rfile_id = TFile(rootfile,'READ')
    dump_dat = rfile_id.Get(TREE_NAME)

    # Get atom masses
    dfile_id = data.data(datafile)
    mass_list = dfile_id.get('Masses')

    # generate list of needed atoms in each object
    id_list = generate_atom_list(style,OPTION_DICT)

    # iterate over snapshots.
    time_list = []              # timestamps
    msd_list = []

    for ie,event in enumerate(dump_dat):
        snap = event.snapshot # TH2D

        # get COM
        com_list = []               # [[x1,y1,z1],[x2,y2,z2],...]
        for obj in id_list:
            totalMass = 0
            com = [0,0,0]
            for atom_id in obj:
                atom_mass = mass_list[int(snap.GetBinContent(atom_id-1,TYPE))-1][1]

                totalMass += atom_mass
                for i in range(3):
                    com[i] += snap.GetBinContent(atom_id-1,XYZ[i])*atom_mass

            for i in range(3):
                com[i] /= totalMass

            # add COM to list
            com_list.append(com)

        if(ie == 0):
            com_list1 = com_list

        # get msd at this snapshot
        msd = 0
        for k,entry in enumerate(com_list):
            for j in range(3):
                msd += (com_list1[k][j] - entry[j])**2
        msd /= (len(com_list)*3)

        msd_list.append(msd)
        time_list.append(event.timestep)

    # make graph for output
    g = TGraph(len(msd_list),array.array('d',time_list),array.array('d',msd_list))
    g.SetTitle("")
    g.GetYaxis().SetTitle("Mean Squared Displacement (A^2)")
    g.GetXaxis().SetTitle("Time Step")

    if(draw):
        g.Draw("AP")

    return g