Example #1
0
 def get_waveforms(self, spikes=None, clusters=None):
     
     if self.waveforms_selected is None:
         return None
     # Special case: return the already-selected values from the cache.
     if spikes is None and clusters is None:
         values = self.process_waveforms(self.waveforms_selected)
         return pandaize(values, self.spikes_waveforms)
     
     # Normal case.
     if self.spikes_selected_table is None:
         return None
     if spikes is not None:
         return select(self.waveforms, spikes)
     else:
         if clusters is None:
             clusters = self.clusters_selected
         if clusters is not None:
             spikes = get_some_spikes_in_clusters(clusters, self.clusters,
                 counter=self.counter,
                 nspikes_max_expected=self.userpref['waveforms_nspikes_max_expected'],
                 nspikes_per_cluster_min=self.userpref['waveforms_nspikes_per_cluster_min'])
         else:
             spikes = self.spikes_selected
     return select(self.waveforms, spikes)
Example #2
0
def atmtemp(x):
    if x == "1h":
        name = "1 HOUR"
        label = "Minutes"
        del time[:]
        del temp[:]
        result = select("atmosphericTemp", 60)
        for i in result:
            temp.append(float(round(i[1], 2)))  # works for the time
            time.append(str(i[0][11:16]))

    elif x == "24h":
        name = "24 HOUR"
        label = "Hours"
        del time[:]
        del temp[:]
        result = select("atmosphericTemp", 1440)
        for i in result:
            temp.append(float(round(i[1], 2)))  # works for the time
            time.append(str(i[0][11:16]))

    elif x == "1w":
        name = "1 WEEK"
        label = "Days"
        del time[:]
        del temp[:]
        result = select("atmosphericTemp", 10080)
        for i in result:
            temp.append(float(round(i[1], 2)))
            time.append(str(i[0][11:16]))

    elif x == "1m":
        name = "1 MONTH"
        label = "Weeks"
        del time[:]
        del temp[:]
        result = select("atmosphericTemp",
                        10)  # please take a look at this one
        for i in result:
            temp.append(float(round(i[1], 2)))
            time.append(str(i[0][11:16]))

    elif x == "1y":
        name = "1 YEAR"
        label = "Months"
        del time[:]
        del temp[:]
        result = select("atmosphericTemp",
                        15)  # please take a look at this one
        for i in result:
            temp.append(float(round(i[1], 2)))
            time.append(str(i[0][11:16]))

    return render_template("atm_temp.html",
                           temp=temp,
                           time=str(time),
                           name=name,
                           label=label)
Example #3
0
def soilmoist(x):
    if x == "1h":
        name = "1 HOUR"
        label = "Minutes"
        del time[:]
        del moist[:]
        result = select("soilMoisture", 60)
        for i in result:
            moist.append(float(round(i[1], 2)))  # works for the time
            time.append(str(i[0][11:16]))

    elif x == "24h":
        name = "24 HOUR"
        label = "Hours"
        del time[:]
        del moist[:]
        result = select("soilMoisture", 1440)
        for i in result:
            moist.append(float(round(i[1], 2)))  # works for the time
            time.append(str(i[0][11:16]))

    elif x == "1w":
        name = "1 WEEK"
        label = "Days"
        del time[:]
        del temp[:]
        result = select("soilMoisture", 10080)
        for i in result:
            moist.append(float(round(i[1], 2)))
            time.append(str(i[0][11:16]))

    elif x == "1m":
        name = "1 MONTH"
        label = "Weeks"
        del time[:]
        del temp[:]
        result = select("soilMoisture", 10)  # please take a look at this one
        for i in result:
            moist.append(float(round(i[1], 2)))
            time.append(str(i[0][11:16]))

    elif x == "1y":
        name = "1 YEAR"
        label = "Months"
        del time[:]
        del temp[:]
        result = select("soilMoisture", 15)  # please take a look at this one
        for i in result:
            moist.append(float(round(i[1], 2)))
            time.append(str(i[0][11:16]))

    return render_template("soil_moist.html",
                           moist=moist,
                           time=str(time),
                           name=name,
                           label=label)
Example #4
0
def humid(x):
    if x == "1h":
        name = "1 HOUR"
        label = "Minutes"
        # with open("./readings/humidity1h.embs") as file:
        # d = file.readlines()
        # del time[:]
        # del temp[:]
        del time[:]
        del humidity[:]
        result = select("humidity", 60)
        for i in result:
            humidity.append(float(round(i[1], 2)))  # works for the time
            time.append(str(i[0][11:16]))
    elif x == "24h":
        name = "24 HOUR"
        label = "Hours"
        del time[:]
        del humidity[:]
        result = select("humidity", 1440)
        for i in result:
            humidity.append(float(round(i[1], 2)))  # works for the time
            time.append(str(i[0][11:16]))

    elif x == "1w":
        name = "1 WEEK"
        label = "Days"
        del time[:]
        del humidity[:]
        result = select("humidity", 10080)
        for i in result:
            humidity.append(float(round(i[1], 2)))  # works for the time
            time.append(str(i[0][0:4]))
    elif x == "1m":
        name = "1 MONTH"
        label = "Weeks"
        del time[:]
        del humidity[:]
        result = select("humidity", 4)
        for i in result:
            humidity.append(float(round(i[1], 2)))  # works for the time
            time.append(str(i[0][11:16]))
    elif x == "1y":
        name = "1 YEAR"
        label = "Months"
        del time[:]
        del humidity[:]
        result = select("humidity", 10)
        for i in result:
            humidity.append(float(round(i[1], 2)))  # works for the time
            time.append(str(i[0][11:16]))
    return render_template("humidity.html",
                           humidity=humidity,
                           time=str(time),
                           name=name,
                           label=label)
 def get_waveforms(self, spikes=None, clusters=None):
     if spikes is not None:
         return select(self.waveforms, spikes)
     else:
         if clusters is None:
             clusters = self.clusters_selected
         if clusters is not None:
             spikes = get_some_spikes_in_clusters(clusters, self.clusters,
                 counter=self.counter,
                 nspikes_max_expected=self.userpref['waveforms_nspikes_max_expected'],
                 nspikes_per_cluster_min=self.userpref['waveforms_nspikes_per_cluster_min'])
         else:
             spikes = self.spikes_selected
     return select(self.waveforms, spikes)
Example #6
0
 def get_waveforms(self, spikes=None, clusters=None):
     if spikes is not None:
         return select(self.waveforms, spikes)
     else:
         if clusters is None:
             clusters = self.clusters_selected
         if clusters is not None:
             spikes = get_some_spikes_in_clusters(clusters, self.clusters,
                 counter=self.counter,
                 nspikes_max_expected=self.userpref['waveforms_nspikes_max_expected'],
                 nspikes_per_cluster_min=self.userpref['waveforms_nspikes_per_cluster_min'])
         else:
             spikes = self.spikes_selected
     return select(self.waveforms, spikes)
def genetic_algo(pop_size, layer_length, num_iteration, prob_activate,
                 prob_mutation, ex_idx):
    survive_size = math.ceil(pop_size / 2.0)
    file_path = 'C:/Users/Zhu/PycharmProjects/experiment_log/GA_log_0514'
    geno = initial_genotype(pop_size, layer_length, [128, 256], prob_activate)
    geno_log = pd.DataFrame(columns=('iteration', 'geno_type', 'score', 'L0',
                                     'L1', 'L2', 'L3', 'L4'))
    for i in range(num_iteration):
        print(
            "===================================== %d th iteration ============================="
            % i)
        # iterate 5 steps
        # evaluate and select
        geno = evaluate(geno,
                        i,
                        num_train=6000,
                        num_test=1500,
                        file_path=file_path,
                        save_model=True)

        # if i%10 == 0:
        #     geno = advanced_evaluate(geno)
        geno_log = save_geno(geno_log, geno, i)
        key_print(geno)
        geno_selected = select(geno, survive_size, model='absolute')
        print("===== Selection part =====")
        print('reserved genos: ')
        key_print(geno_selected)

        # recombine
        print("====== Recombination Part ======-")
        new_birth_size = pop_size - int(survive_size)

        for j in range(new_birth_size):
            parents_list = select(geno_selected, 2, model='parents_abs')
            parent1 = parents_list[0]
            parent2 = parents_list[1]
            child = recombine(parent1, parent2, 0.5, single_child_flag=True)
            new_register(geno_selected, child)

        geno_selected = protect_best(geno_selected)
        for key in geno_selected:
            if not geno_selected[key]['protected']:
                if np.random.random() > 0.5:
                    geno_selected[key] = mutate(geno_selected[key],
                                                prob_mutation)
        print("===== Mutation part =====")
        dict_print(geno_selected)
        geno = geno_selected
 def get_spiketimes(self, spikes=None, clusters=None):
     if clusters is not None:
         spikes = get_spikes_in_clusters(clusters, self.clusters)
     if spikes is None:
         spikes = self.spikes_selected
     spiketimes = getattr(self, 'spiketimes', getattr(self, 'spiketimes_res', None))
     return select(spiketimes, spikes)
Example #9
0
    def iterate(self):
        # Possible expansion of MOEA. Hence, why this operation is more complicated than it needs to be.
        prevPop = selection.select(self)
        nextPop = []

        # Focus on the poses
        for i in range(len(prevPop)):
            # Setup variables for creating the new protein configuration to be added to the population.
            childData = ProteinData(Pose(), prevPop[i].score)
            # copy pose from previous generation.
            childData.pose.assign(prevPop[i].pose)

            # Begin protein manipulation
            crossover.typeofcrossover(self, childData)  # Perform a crossover
            variation.perturb(self, childData)  # Apply fragment replacement
            # Local search for possible improvement
            improvement.localSearch(self, childData)
            nextPop.append(childData)

        # Elitest-based Truncation Selection
        self.population = selection.truncate(self, prevPop, nextPop)

        # Evaluate the newest generation
        for i in range(len(self.population)):
            # align the poses for a better score.
            # Don't think this is working. Tested it and didn't show any signficant changes.
            # calpha_superimpose_pose(self.population[i].pose, self.knownNative)
            # Evaluate and store the rmsd score between the 2 poses along with the score.
            self.population[i].rmsd = CA_rmsd(self.population[i].pose,
                                              self.knownNative)
            self.population[i].age += 1
            self.proteinDB.append([
                self.population[i].rmsd, self.population[i].score,
                self.population[i].age
            ])
Example #10
0
 def get_spiketimes(self, spikes=None, clusters=None):
     if clusters is not None:
         spikes = get_spikes_in_clusters(clusters, self.clusters)
     if spikes is None:
         spikes = self.spikes_selected
     spiketimes = getattr(self, 'spiketimes', getattr(self, 'spiketimes_res', None))
     return select(spiketimes, spikes)
Example #11
0
 def get_masks(self, spikes=None, full=None, clusters=None):
     if clusters is not None:
         spikes = get_spikes_in_clusters(clusters, self.clusters)
     if spikes is None:
         spikes = self.spikes_selected
     if not full:
         masks = self.masks
     else:
         masks = self.masks_full
     return select(masks, spikes)
Example #12
0
 def get_cluster_colors(self, clusters=None, can_override=True):
     if clusters is None:
         clusters = self.clusters_selected
     if can_override and self.override_color:
         group_colors = get_array(self.get_group_colors("all"))
         groups = get_array(self.get_cluster_groups("all"))
         colors = pd.Series(group_colors[groups], index=self.get_clusters_unique())
     else:
         colors = pd.Series([self.get_cluster_color(c) for c in clusters], index=clusters)
     return select(colors, clusters)
 def get_masks(self, spikes=None, full=None, clusters=None):
     if clusters is not None:
         spikes = get_spikes_in_clusters(clusters, self.clusters)
     if spikes is None:
         spikes = self.spikes_selected
     if not full:
         masks = self.masks
     else:
         masks = self.masks_full
     return select(masks, spikes)
Example #14
0
 def get_channel_colors(self, channels=None, can_override=True):
     if channels is None:
         channels = self.channels_selected
     if can_override and self.override_color:
         channel_group_colors = get_array(self.get_channel_group_colors("all"))
         channel_groups = get_array(self.get_channel_groups("all"))
         colors = pd.Series(channel_group_colors[channel_groups], index=self.channels)
     else:
         colors = self.channel_colors
     return select(colors, channels)
 def get_channel_colors(self, channels=None, can_override=True,
         ):
     if channels is None:
         channels = self.channels_selected
     if can_override and self.override_color:
         channel_group_colors = get_array(self.get_channel_group_colors('all'))
         channel_groups = get_array(self.get_channel_groups('all'))
         colors = pd.Series(channel_group_colors[channel_groups], 
             index=self.channels)
     else:
         colors = self.channel_colors
     return select(colors, channels)
Example #16
0
def kth_quantiles(array, k):
    """K-th quantiles."""
    if k == 1:
        return list()
    length = len(array)
    if not k % 2:
        mid = length // 2 if length % 2 else length // 2 - 1
        pivot = selection.select(array, mid + 1)
        selection.partition(array, pivot)
        return (kth_quantiles(array[:mid], k // 2)
                + [pivot]
                + kth_quantiles(array[mid + 1:], k // 2))
    low = (length * k - length - 1) // (2 * k)
    high = length - 1 - low
    left = selection.select(array, low + 1)
    right = selection.select(array, high + 1)
    selection.partition(array, left)
    left_quantiles = kth_quantiles(array[:low], k // 2)
    selection.partition(array, right)
    right_quantiles = kth_quantiles(array[high + 1:], k // 2)
    return left_quantiles + [left, right] + right_quantiles
 def get_cluster_colors(self, clusters=None, can_override=True,
         ):
     if clusters is None:
         clusters = self.clusters_selected
     if can_override and self.override_color:
         group_colors = get_array(self.get_group_colors('all'))
         groups = get_array(self.get_cluster_groups('all'))
         colors = pd.Series(group_colors[groups], 
             index=self.get_clusters_unique())
     else:
         colors = self.cluster_colors
     return select(colors, clusters)
Example #18
0
 def get_features(self, spikes=None, clusters=None):
     if self.spikes_selected_table is None:
         return None
     # Special case: return the already-selected values from the cache.
     if spikes is None and clusters is None:
         features = self.spikes_selected_table['features']
         values = self.process_features(features)
         return pandaize(values, self.spikes_selected)
     # Normal case.
     if clusters is not None:
         spikes = get_spikes_in_clusters(clusters, self.clusters)
     if spikes is None:
         spikes = self.spikes_selected
     return select(self.features, spikes)
Example #19
0
def weighted_median_r(array, expect):
    """Weighted median recursion."""
    length = len(array)
    if length == 1:
        return array[0]
    mid = (length - 1) // 2
    pivot = selection.select(array, mid + 1)
    selection.partition(array, pivot)
    lsum = sum(array[:mid])
    if lsum < expect:
        if lsum + pivot >= expect:
            return pivot
        return weighted_median_r(array[mid + 1:],
                                 expect - lsum - pivot)
    return weighted_median_r(array[:mid], expect)
 def get_some_features(self, clusters=None):
     """Return the features for a subset of all spikes: a large number
     of spikes from any cluster, and a controlled subset of the selected 
     clusters."""
     if clusters is None:
         clusters = self.clusters_selected
     if clusters is not None:
         spikes_background = get_some_spikes(self.clusters,
             nspikes_max=self.userpref['features_nspikes_background_max'],)
         spikes_clusters = get_some_spikes_in_clusters(
             clusters,
             self.clusters,
             counter=self.counter,
             nspikes_max_expected=self.userpref[
                 'features_nspikes_selection_max'],
             nspikes_per_cluster_min=self.userpref[
                 'features_nspikes_per_cluster_min'])
         spikes = np.union1d(spikes_background, spikes_clusters)
     else:
         spikes = self.spikes_selected
     return select(self.features, spikes)
Example #21
0
 def get_some_features(self, clusters=None):
     """Return the features for a subset of all spikes: a large number
     of spikes from any cluster, and a controlled subset of the selected 
     clusters."""
     if clusters is None:
         clusters = self.clusters_selected
     if clusters is not None:
         spikes_background = get_some_spikes(self.clusters,
             nspikes_max=self.userpref['features_nspikes_background_max'],)
         spikes_clusters = get_some_spikes_in_clusters(
             clusters,
             self.clusters,
             counter=self.counter,
             nspikes_max_expected=self.userpref[
                 'features_nspikes_selection_max'],
             nspikes_per_cluster_min=self.userpref[
                 'features_nspikes_per_cluster_min'])
         spikes = np.union1d(spikes_background, spikes_clusters)
     else:
         spikes = self.spikes_selected
     return select(self.features, spikes)
Example #22
0
 def get_masks(self, spikes=None, full=None, clusters=None):
     if self.spikes_selected_table is None:
         return None
     # Special case: return the already-selected values from the cache.
     if spikes is None and clusters is None:
         masks = self.spikes_selected_table['masks']
         if full:
             values = self.process_masks_full(masks)
         else:
             values = self.process_masks(masks)
         return pandaize(values, self.spikes_selected)
         
     # Normal case.
     if clusters is not None:
         spikes = get_spikes_in_clusters(clusters, self.clusters)
     if spikes is None:
         spikes = self.spikes_selected
     if not full:
         masks = self.masks
     else:
         masks = self.masks_full
     return select(masks, spikes)
Example #23
0
 def get_channel_groups_visible(self, channel_groups=None):
     return select(self.channel_groups_visible, channel_groups)
Example #24
0
 def get_channel_groups(self, channels=None):
     if channels is None:
         channels = self.channels_selected
     return select(self.channel_groups, channels)
Example #25
0
 def get_channels_visible(self, channels=None):
     return select(self.channels_visible, channels)
Example #26
0
 def get_channel_names(self, channels=None):
     return select(self.channel_names, channels)
Example #27
0
 def get_channel_color(self, channel):
     try:
         return select(self.channel_colors, channel)
     except IndexError:
         return 0
Example #28
0
    def build(self, rgb, train_mode=None):
        """
        load variable from npy to build the VGG

        :param rgb: rgb image [batch, height, width, 3] values scaled [0, 1]
        :param train_mode: a bool tensor, usually a placeholder: if True, dropout will be turned on
        """
        with tf.variable_scope("Pre_Processing"):
            rgb_scaled = rgb * 255.0

            # Convert RGB to BGR
            red, green, blue = tf.split(axis=3,
                                        num_or_size_splits=3,
                                        value=rgb_scaled)
            assert red.get_shape().as_list()[1:] == [160, 288, 1]
            assert green.get_shape().as_list()[1:] == [160, 288, 1]
            assert blue.get_shape().as_list()[1:] == [160, 288, 1]
            bgr = tf.concat(axis=3,
                            values=[
                                blue - VGG_MEAN[0],
                                green - VGG_MEAN[1],
                                red - VGG_MEAN[2],
                            ])
            assert bgr.get_shape().as_list()[1:] == [160, 288, 3]

        # Convolution Stages
        self.conv1_1 = self.conv_layer(bgr,
                                       3,
                                       64,
                                       "conv1_1",
                                       train_mode,
                                       trainable=0)
        self.conv1_2 = self.conv_layer(self.conv1_1,
                                       64,
                                       64,
                                       "conv1_2",
                                       train_mode,
                                       tracking=1,
                                       trainable=0)
        self.pool1 = self.max_pool(self.conv1_2, 'pool1')

        self.conv2_1 = self.conv_layer(self.pool1,
                                       64,
                                       128,
                                       "conv2_1",
                                       train_mode,
                                       trainable=0)
        self.conv2_2 = self.conv_layer(self.conv2_1,
                                       128,
                                       128,
                                       "conv2_2",
                                       train_mode,
                                       tracking=1,
                                       trainable=0)
        self.pool2 = self.max_pool(self.conv2_2, 'pool2')

        self.conv3_1 = self.conv_layer(self.pool2,
                                       128,
                                       256,
                                       "conv3_1",
                                       train_mode,
                                       trainable=0)
        self.conv3_2 = self.conv_layer(self.conv3_1,
                                       256,
                                       256,
                                       "conv3_2",
                                       train_mode,
                                       trainable=0)
        self.conv3_3 = self.conv_layer(self.conv3_2,
                                       256,
                                       256,
                                       "conv3_3",
                                       train_mode,
                                       trainable=0)
        self.conv3_4 = self.conv_layer(self.conv3_3,
                                       256,
                                       256,
                                       "conv3_4",
                                       train_mode,
                                       tracking=1,
                                       trainable=0)
        self.pool3 = self.max_pool(self.conv3_4, 'pool3')

        self.conv4_1 = self.conv_layer(self.pool3,
                                       256,
                                       512,
                                       "conv4_1",
                                       train_mode,
                                       trainable=0)
        self.conv4_2 = self.conv_layer(self.conv4_1,
                                       512,
                                       512,
                                       "conv4_2",
                                       train_mode,
                                       trainable=0)
        self.conv4_3 = self.conv_layer(self.conv4_2,
                                       512,
                                       512,
                                       "conv4_3",
                                       train_mode,
                                       trainable=0)
        self.conv4_4 = self.conv_layer(self.conv4_3,
                                       512,
                                       512,
                                       "conv4_4",
                                       train_mode,
                                       tracking=1,
                                       trainable=0)
        self.pool4 = self.max_pool(self.conv4_4, 'pool4')

        self.conv5_1 = self.conv_layer(self.pool4,
                                       512,
                                       512,
                                       "conv5_1",
                                       train_mode,
                                       trainable=0)
        self.conv5_2 = self.conv_layer(self.conv5_1,
                                       512,
                                       512,
                                       "conv5_2",
                                       train_mode,
                                       trainable=0)
        self.conv5_3 = self.conv_layer(self.conv5_2,
                                       512,
                                       512,
                                       "conv5_3",
                                       train_mode,
                                       trainable=0)
        self.conv5_4 = self.conv_layer(self.conv5_3,
                                       512,
                                       512,
                                       "conv5_4",
                                       train_mode,
                                       tracking=1,
                                       trainable=0)
        self.pool5 = self.max_pool(self.conv5_4, 'pool5')

        # FC Layers + Relu + Dropout
        self.fc6 = self.affine_layer(self.pool5,
                                     23040,
                                     4096,
                                     "fc6",
                                     train_mode,
                                     tracking=1)
        self.fc7 = self.affine_layer(self.fc6,
                                     4096,
                                     4096,
                                     "fc7",
                                     train_mode,
                                     tracking=1)
        self.fc8 = self.affine_layer(self.fc7,
                                     4096,
                                     33 * 9 * 5,
                                     "fc8",
                                     train_mode,
                                     tracking=1,
                                     dropout=1.0)

        # Upscaling last branch
        with tf.variable_scope("FC_rs"):
            self.fc_RS = tf.reshape(self.fc8, [-1, 5, 9, 33])

        #-------branch 1-----
        scale = 1
        self.bn_pool1 = self.batch_norm(self.pool1,
                                        train_mode,
                                        name="branch1_bn")
        self.branch1_1 = self.conv_layer(self.bn_pool1, 64, 33, "branch1_conv",
                                         train_mode)
        self.branch1_2 = self.deconv_layer(self.branch1_1,
                                           33,
                                           33,
                                           scale,
                                           0,
                                           'branch1_upconv',
                                           train_mode,
                                           tracking=1)

        #-------branch 2-----
        scale *= 2
        self.bn_pool2 = self.batch_norm(self.pool2,
                                        train_mode,
                                        name="branch2_bn")
        self.branch2_1 = self.conv_layer(self.bn_pool2, 128, 33,
                                         "branch2_conv", train_mode)
        self.branch2_2 = self.deconv_layer(self.branch2_1,
                                           33,
                                           33,
                                           scale,
                                           0,
                                           'branch2_upconv',
                                           train_mode,
                                           tracking=1)

        # -------branch 3-----
        scale *= 2
        self.bn_pool3 = self.batch_norm(self.pool3,
                                        train_mode,
                                        name="branch3_bn")
        self.branch3_1 = self.conv_layer(self.bn_pool3, 256, 33,
                                         "branch3_conv", train_mode)
        self.branch3_2 = self.deconv_layer(self.branch3_1,
                                           33,
                                           33,
                                           scale,
                                           0,
                                           'branch3_upconv',
                                           train_mode,
                                           tracking=1)

        # -------branch 4-----
        scale *= 2
        self.bn_pool4 = self.batch_norm(self.pool4,
                                        train_mode,
                                        name="branch4_bn")
        self.branch4_1 = self.conv_layer(self.bn_pool4, 512, 33,
                                         "branch4_conv", train_mode)
        self.branch4_2 = self.deconv_layer(self.branch4_1,
                                           33,
                                           33,
                                           scale,
                                           0,
                                           'branch4_upconv',
                                           train_mode,
                                           tracking=1)

        # -------branch 5 (FC) -----
        scale *= 2
        self.branch5_1 = tf.nn.relu(self.fc_RS)
        self.branch5_2 = self.deconv_layer(self.branch5_1,
                                           33,
                                           33,
                                           scale,
                                           0,
                                           'branch5_upconv',
                                           train_mode,
                                           tracking=1,
                                           relu=0)

        # Combine and x2 Upsample
        self.up_sum = self.branch1_2 + self.branch2_2 + self.branch3_2 + self.branch4_2 + self.branch5_2

        scale = 2
        self.up = self.deconv_layer(self.up_sum,
                                    33,
                                    33,
                                    scale,
                                    0,
                                    'up',
                                    train_mode,
                                    tracking=1,
                                    initialization="bilinear")

        self.up_conv = self.conv_layer(self.up,
                                       33,
                                       33,
                                       "up_conv",
                                       train_mode,
                                       tracking=1)

        # Tracking presoftmax activation
        with tf.name_scope('up_conv_act'):
            variable_summaries(self.up_conv)

        # Add + Mask + Selection
        with tf.variable_scope("mask_softmax"):
            self.mask = tf.nn.softmax(self.up_conv)
            self.mask.set_shape([None, 160, 288, 33])

        # Tracking Mask
        with tf.name_scope('mask_act'):
            variable_summaries(self.mask)

        self.prob = selection.select(self.mask, rgb)

        # Clear out init dictionary
        self.data_dict = None
Example #29
0
 def get_spiketimes(self, spikes=None):
     return select(self.spiketimes, spikes)
Example #30
0
 def get_clusters(self, spikes=None):
     return select(self.clusters, spikes)
 def get_cluster_groups(self, clusters=None):
     if clusters is None:
         clusters = self.clusters_selected
     return select(self.cluster_groups, clusters)
Example #32
0
 def get_traces(self):
     return select(self.raw)
Example #33
0
 def get_waveforms(self, spikes=None):
     return select(self.waveforms, spikes)
Example #34
0
 def get_clusters(self, spikes=None, clusters=None):
     if clusters is not None:
         spikes = get_spikes_in_clusters(clusters, self.clusters)
     if spikes is None:
         spikes = self.spikes_selected
     return select(self.clusters, spikes)
Example #35
0
def main():
    parser = argparse.ArgumentParser(
        description=
        'PREDAIP: Computational Prediction and Analysis for Anti-inflammatory Peptide via a Hybrid Feature Selection Technique.'
    )
    parser.add_argument(
        '-input',
        dest='inputfile',
        type=str,
        help='Query peptide sequences to be predicted in txt format.',
        required=True)
    parser.add_argument('-threshold',
                        dest='threshold_value',
                        type=float,
                        help='Please input a value between 0 and 1',
                        required=True)
    parser.add_argument('-output',
                        dest='outputfile',
                        type=str,
                        help='Saving the prediction results in csv format.',
                        required=False)
    args = parser.parse_args()
    inputfile = args.inputfile
    threshold = args.threshold_value
    outputfile = args.outputfile

    print('Peptide sequences are loading...')
    sequence, namelist = ExtractSeq(inputfile)

    print('FusedFeatures are extracting...')
    data = ExtractFeatures(sequence, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12)
    X_test = select(data)
    print('Loading model...')
    model = joblib.load('best.model')
    prediction = model.predict_proba(X_test)
    n = int(len(sequence))
    peptide_name = []
    Sequences = []
    Probability = []
    print('AIPs were predicted as follows.')
    print('-------------------------------')
    for i in range(n):
        if prediction[i][1] > threshold:
            peptide_name.append(namelist[i])
            Sequences.append(sequence[i])
            print(namelist[i])
            print(sequence[i])
            var = '%.11f' % prediction[i][1]
            Probability.append(var)
            print('probability value:' + str(var))
            print('-------------------------------')
        else:
            print(namelist[i])
            print(sequence[i])
            print('The peptide is not AIP')
            print('-------------------------------')

    AA = {'a': peptide_name, 'b': Sequences, 'c': Probability}
    AIP = pd.DataFrame(AA)
    AIP.to_csv(outputfile,
               index=False,
               header=['peptide name', 'Sequences', 'Probability value'])
Example #36
0
 def get_channel_group_colors(self, channel_groups=None):
     return select(self.channel_group_colors, channel_groups)
Example #37
0
 def get_channel_group_names(self, channel_groups=None):
     return select(self.channel_group_names, channel_groups)
Example #38
0
 def get_masks(self, spikes=None, full=None):
     if not full:
         masks = self.masks
     else:
         masks = self.masks_full
     return select(masks, spikes)
 def get_group_names(self, groups=None):
     return select(self.group_names, groups)
Example #40
0
    def Decls(self,
             name=None,
             fullname=None,
             type=None,
             retval=None,
             args=None,
             anyarg=None,
             signature=None,
             header=None,
             headerdir=None,
             accesstype=None,
             const=None,
             virtuality=None,
             filter=None,
             recursive=None,
             allow_empty=None,
             assert_count=None
             ):
        """Obtain a Decl object referencing one or more declarations.

        Filters all contained declarations and returns a new Decl
        object that only contains declarations matching the filtering
        rules as specified by the arguments. If an argument is None,
        that particular filtering operation is disabled. If several
        arguments are provided, all of them must be matched.

        For any filter that is based on strings (such as name) the
        following rules apply:

         - A string must match exactly the corresponding attribute of the
           declaration (C{name="wxFrame"} will only return the class
           "wxFrame").
         - A string that is bracketed by a leading and trailing slash '/' is
           interpreted as a regular expression (C{name="/wx.*/"} will return
           all classes that begin with "wx").

        Any argument can also be passed a list of values which duplicates
        the filter. These filter are concatenated with OR, so a declaration
        has to match only one of the filters. For example, you can select all
        classes starting with either "wx" or "WX" by setting
        C{name=["/wx.*/", "/WX.*/"}].

        The user defined filter function filter must accept a Decl
        object as argument and has to return True when the declaration
        is matched.

        @param name: Select declarations by name
        @type name: str
        @param fullname: Select declarations by name (which includes namespaces)
        @type fullname: str
        @param type: Select declarations by type. The type is given by a combination of flags (CLASS, MEMBER_FUNCTION/METHOD, FREE_FUNCTION/FUNCTION, ENUM, ...)
        @type type: int
        @param retval: Select functions/methods based on their return value (this implies the type flags MEMBER_FUNCTION | FREE_FUNCTION)
        @type retval: str
        @param args: Select functions/methods bases on their arguments (this implies the type flags MEMBER_FUNCTION | FREE_FUNCTION)
        @type args: list of str
        @param anyarg: Select all functions/methods that have the specified argument somewhere in their argument list (this implies the type flags MEMBER_FUNCTION | FREE_FUNCTION)
        @type anyarg: str
        @param signature: Select declarations by their signature (this implies the type flags MEMBER_FUNCTION | FREE_FUNCTION)
        @type signature: str
        @param header: Select declarations by the header file in which they are defined
        @type header: str
        @param headerdir: Select declarations by the directory in which their header file is located
        @type headerdir: str
        @param accesstype: Access type (PUBLIC or PROTECTED). This implies the type flags CLASS_MEMBER.
        @param const: Select declarations by their constness.
        @type const: bool
        @param virtuality: Select declarations by their virtuality. This implies the type flags CLASS_MEMBER:
        @type virtuality: Virtuality flags
        @param filter: User defined filter function
        @type callable
        @param recursive: Extend the search to grandchildren? If not specified, a global (customizable) default value is used.
        @type recursive: bool
        @param allow_empty: Allow empty results. If not specified, a global (customizable) default value is used.
        @type allow_empty: bool
        @param assert_count: Check the number of matched declarations in the resulting Decl object
        @type assert_count: int
        :rtype: Returns a Decl object that may reference an arbitrary number of declarations.
        @rtype: IDecl
        @see: Namespace(), Class(), Method(), Function(), Enum()
        """
        global allow_empty_queries, default_recursive

        itype = 0
        filters = []

        if recursive==None:
            recursive = default_recursive
        if allow_empty==None:
            allow_empty = allow_empty_queries
        
        def addFilter(arg, filtercls):
            if arg!=None:
                if _type(arg)==list:
                    filters.append(OrFilter(map(lambda x: filtercls(x), arg)))
                else:
                    filters.append(filtercls(arg))

        # name filter
        addFilter(name, NameFilter)
        # fullname filter
        addFilter(fullname, FullNameFilter)
        # retval filter
        if retval!=None:
            addFilter(retval, RetValFilter)
            itype |= CALLABLE
        # args filter
        if args!=None:
            filters.append(ArgsFilter(args))
            itype |= CALLABLE
        # anyarg filter
        if anyarg!=None:
            raise NotImplementedError, "anyarg filter is not yet implemented"
        # signature filter
        if signature!=None:
            raise NotImplementedError, "signature filter is not yet implemented"
        # header filter
        addFilter(header, HeaderFilter)
        # headerdir filter
        addFilter(headerdir, HeaderDirFilter)
        # accesstype filter
        if accesstype!=None:
            addFilter(accesstype, AccessTypeFilter)
            itype |= CLASS_MEMBER
        # const filter
        if const!=None:
            addFilter(const, ConstFilter)
        # virtuality filter
        if virtuality!=None:
            addFilter(virtuality, VirtualityFilter)
            itype |= CLASS_MEMBER
        # custom filters
        if filter!=None:
            if _type(filter)==list:
                filters.extend(map(lambda x: CustomFilter(x), filter))
            else:
                filters.append(CustomFilter(filter))

        # XXX
        if itype!=0:
            if type==None:
                type = 0
            if (type & CALLABLE)==0:
                type |= itype
        addFilter(type, TypeFilter)

        # Add parent filters...
        pfs = []
        for decl in self.decl_handles:
            pfs.append(ParentFilter(decl, recursive))
        if len(pfs)>0:
            if len(pfs)==1:
                filters.append(pfs[0])
            else:
                filters.append(OrFilter(pfs))

        # Create the final filter by combining the individual filters
        # with AND...
        if len(filters)==0:
            filter = TrueFilter()
        elif len(filters)==1:
            filter = filters[0]
        else:
            filter = AndFilter(filters)

#        print "Filter:",filter
        if len(self.decl_handles)==0:
            decls = []
        else:
            decls = selection.select(self.decl_handles, filter)

        res = IDecl(decls, str(filter), modulebuilder=self.modulebuilder)
        count = res.count
        if allow_empty and count==0:
            return res
        if count==0:
            raise RuntimeError, "Query produced no results (filter: %s)"%filter
        res_count = res.count
        # If all contained declarations are from one single set of overloads
        # then treat that as one single declaration
        if res._checkOverloads():
            res_count = 1
        if assert_count!=None:
            if res_count!=assert_count:
                raise RuntimeError, "Query produced the wrong number of results (%d instead of %d)"%(res_count, assert_count)
        return res
 def get_clusters(self, spikes=None, clusters=None):
     if clusters is not None:
         spikes = get_spikes_in_clusters(clusters, self.clusters)
     if spikes is None:
         spikes = self.spikes_selected
     return select(self.clusters, spikes)
Example #42
0
 def get_features(self, spikes=None):
     return select(self.features, spikes)
 def get_channel_names(self, channels=None):
     return select(self.channel_names, channels)
 def get_traces(self):
     return select(self.raw)
 def get_cluster_color(self, cluster):
     try:
         return select(self.cluster_colors, cluster)
     except IndexError:
         return 0
 def get_channel_groups(self, channels=None):
     if channels is None:
         channels = self.channels_selected
     return select(self.channel_groups, channels)
 def get_group_colors(self, groups=None):
     return select(self.group_colors, groups)
 def get_channel_group_colors(self, channel_groups=None):
     return select(self.channel_group_colors, channel_groups)
 def get_cluster_sizes(self, clusters=None):
     if clusters is None:
         clusters = self.clusters_selected
     # counter = Counter(self.clusters)
     sizes = pd.Series(self.counter, dtype=np.int32)
     return select(sizes, clusters)
Example #50
0
 def get_freq(self):
     return select(self.freq)
 def get_channel_color(self, channel):
     try:
         return select(self.channel_colors, channel)
     except IndexError:
         return 0
Example #52
0
 def get_cluster_color(self, cluster):
     try:
         return select(self.cluster_colors, cluster)
     except IndexError:
         return 0
 def get_channels_visible(self, channels=None):
     return select(self.channels_visible, channels)
Example #54
0
 def get_cluster_groups(self, clusters=None):
     if clusters is None:
         clusters = self.clusters_selected
     return select(self.cluster_groups, clusters)
 def get_channel_groups_visible(self, channel_groups=None):
     return select(self.channel_groups_visible, channel_groups)
Example #56
0
 def get_group_colors(self, groups=None):
     return select(self.group_colors, groups)
 def get_channel_group_names(self, channel_groups=None):
     return select(self.channel_group_names, channel_groups)
Example #58
0
 def get_group_names(self, groups=None):
     return select(self.group_names, groups)
 def get_freq(self):
     return select(self.freq)
Example #60
0
 def get_cluster_sizes(self, clusters=None):
     if clusters is None:
         clusters = self.clusters_selected
     # counter = Counter(self.clusters)
     sizes = pd.Series(self.counter, dtype=np.int32)
     return select(sizes, clusters)