예제 #1
0
def main():

    toggle_no=int(sys.argv[1])

    a_models=['ridge','forest','svm','fnn']
    a=modelbank.seqandassay_to_yield_model([1,8,10],a_models[toggle_no],1)
    a.cross_validate_model()
    a.test_model()
    a.plot()
예제 #2
0
def main():

    toggle_no=int(sys.argv[1])
    ## The integer input from the command line prompt is stored in the toggle_no variable. The input
    ## has to be between [0,3]. Any other input would cause an error
    a_models=['ridge','forest','svm','fnn']
    ## A string list is created called a_models with elements corresponding different regression models
    ## outlined in the model_architecture.py script
    a=modelbank.seqandassay_to_yield_model([1,8,10],a_models[toggle_no],1)
    ## Then a seqandassay_to_yield_model() object defined in the submodels_module.py script is created.
    ## It is instantiated with an integer list, a string and a float. The integer list corresponds to the
    ## assay scores that are going to be used to build the regression model. The string is the "toggle_no"
    ## element of the a_models list while the float corresponds to the sample fraction.
    a.cross_validate_model()
    a.test_model()
    a.plot()
예제 #3
0
def main():
    '''
    compare test performances when reducing training sample size. This version is for first paper, predicting yield from assays and one-hot encoded sequence. 
    '''

    a = int(sys.argv[1])
    if a < 4:
        b = 0
    elif a < 8:
        a = a - 4
        b = 1
    elif a < 12:
        a = a - 8
        b = 2
    elif a == 12:
        b = 3
        a = a - 12
    else:
        print('incorrect toggle number')

    arch_list = ['ridge', 'svm', 'forest', 'fnn']

    # size_list=[0.055,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1]
    size_list = [0.7, 0.8, 0.9, 1]

    for size in size_list:
        if b == 0:
            mdl = modelbank.seqandassay_to_yield_model([1, 8, 10],
                                                       arch_list[a], size)
        elif b == 1:  #1,5,9,12
            mdl = modelbank.assay_to_yield_model([1, 8, 10], arch_list[a],
                                                 size)
        elif b == 2:
            mdl = modelbank.seq_to_yield_model(arch_list[a], size)
        elif b == 3:
            mdl = modelbank.control_to_yield_model(arch_list[a], size)

        for seed in range(9):  #no seed is seed=42
            mdl.change_sample_seed(seed)
            mdl.cross_validate_model()
            mdl.limit_test_set([1, 8, 10])
            mdl.test_model()
예제 #4
0
# if __name__ == '__main__':
#     main()

loss_per_model, std_per_model = [], []
arch_list = ['ridge', 'svm', 'forest', 'fnn']

for i in range(4):
    cv_loss, test_loss, test_std = np.inf, np.inf, 0
    for arch in arch_list:
        if i == 0:
            mdl = modelbank.assay_to_yield_model([1, 8, 10], arch, 1)
        elif i == 1:
            mdl = modelbank.weighted_assay_to_yield_model([1, 8, 10], arch, 1)
        elif i == 2:
            mdl = modelbank.seqandassay_to_yield_model([1, 8, 10], arch, 1)
        else:
            mdl = modelbank.seqandweightedassay_to_yield_model([1, 8, 10],
                                                               arch, 1)
        if mdl.model_stats['cv_avg_loss'] < cv_loss:
            cv_loss = mdl.model_stats['cv_avg_loss']
            test_loss = mdl.model_stats['test_avg_loss']
            test_std = mdl.model_stats['test_std_loss']
    loss_per_model.append(test_loss)
    std_per_model.append(test_std)

seq_model = modelbank.seq_to_yield_model('forest', 1)
seq_loss = seq_model.model_stats['test_avg_loss']
seq_std = seq_model.model_stats['test_std_loss']
x = [-0.3, 0.8]
seq_plus = [seq_loss + seq_std] * 2
예제 #5
0
    >>> d.fit([[1,2,3],[4,5,6],[7,8,9]], [1,2,3])
    >>> get_node_depths(d.tree_)
    array([0, 1, 1, 2, 2])
    """
    def get_node_depths_(current_node, current_depth, l, r, depths):
        depths += [current_depth]
        if l[current_node] != -1 and r[current_node] != -1:
            get_node_depths_(l[current_node], current_depth + 1, l, r, depths)
            get_node_depths_(r[current_node], current_depth + 1, l, r, depths)

    depths = []
    get_node_depths_(0, 0, tree.children_left, tree.children_right, depths)
    return np.array(depths)


a = mb.seqandassay_to_yield_model([1, 8, 10], 'forest', 1)
a.load_model(0)

#sort1 = input 0
#sort8 = input 1
#sort10 = input 2
fig, ax = plt.subplots(1, 3, figsize=[6, 2], dpi=300, sharey=True)
sorts = ["Prot K 37", "GFP SHuffle", r'$\beta$' + "-lactamase SHuffle"]
for sort_no in [0, 1, 2]:

    blac_nodes_info = []
    for j in range(a._model.model.n_estimators):
        a_tree = a._model.model.estimators_[j].tree_
        node_depth = get_node_depths(a_tree)

        for i in range(a_tree.node_count):
예제 #6
0
def main():
    '''
    compare test performances when reducing training sample size. This version is for first paper, predicting yield from assays and one-hot encoded sequence. 
    '''
    ## A command line input is required when running this program. The integer input
    ## should be between 0-12.
    a=int(sys.argv[1])
    if a<4:
        b=0
        ## if the input is less than 4 then b value is set to 0
    elif a<8:
        a=a-4
        b=1
        ## if a is between 4-8 then the b value is set to 1 and a is reduced by 4
    elif a<12:
        a=a-8
        b=2
        ## if a is between 8-12 then the b value is set to 2 and a is reduced by 8
    elif a==12:
        b=3
        a=a-12
        ## if a is equal to 12 then the b value is set to 3 and a is set to 0. 
    else:
        print('incorrect toggle number')
        ## If the inout is out of bounds then an error message is printed. 
    arch_list=['ridge','svm','forest','fnn']
    ## A string list is created containing the names of the different regression models and stored as arch_list
    # size_list=[0.055,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1]
    size_list=[0.7,0.8,0.9,1]
    ## A float list is created containing varying amounts of sample fractions and stored as size_list
    for size in size_list:
        ## each element in the size_list array, we check the value of the b value created in the above if-else
        ## statements and this dictates the kind of submodel_module.py object created
        ## if b = 0, then a seqandassay_to_yield_model object is created with an assay list of [1,8,10]
        ## a regression model dictated by the 'a' index of the arch_list and the size determined by the iteration of size_list
        if b==0:
            mdl=modelbank.seqandassay_to_yield_model([1,8,10],arch_list[a],size)
        ## if b = 1, then a assay_to_yield_model object is created with an assay list of [1,8,10]
        ## a regression model dictated by the 'a' index of the arch_list and the size determined by the iteration of size_list
        elif b==1: #1,5,9,12
            mdl=modelbank.assay_to_yield_model([1,8,10],arch_list[a],size)
        ## if b = 2, then a seq_to_yield_model object is created with a regression model dictated by
        ## the 'a' index of the arch_list and the size determined by the iteration of size_list
        elif b==2: 
            mdl=modelbank.seq_to_yield_model(arch_list[a],size)
        ## if b = 3, then a control_to_yield_model object is created with a regression model dictated by
        ## the 'a' index of the arch_list and the size determined by the iteration of size_list
        elif b==3:
            mdl=modelbank.control_to_yield_model(arch_list[a],size)
            
        for seed in range(9): #no seed is seed=42
            ## For each element in the int range [0,9). The sample_seed class int to the element
            ## Then the trial data, model data and plots are updated to reflect the new sample_seed size
            mdl.change_sample_seed(seed)
            ## Then the best hyperparameters for the given model and seed size is determined using the cross_validate_model()
            ## function from the model object 
            mdl.cross_validate_model()
            ## Following this limit_test_set() function defined in the x_to_yield_model parent class to update the
            ## testing_df class dataframe to reflect the 1,8,10 assays.
            mdl.limit_test_set([1,8,10])
            ## Finally using the test_model() function from the model parent class  is run to
            ## train the model using the hyperparameters defined above and the training data to predict the testing dataset.
            mdl.test_model()