Exemple #1
0
def run_benchmark(t=5000000, x=5):

    print('SIGMOID')
    t_sigmoid1 = timer()
    t_sigmoid1.start()
    for x in range(t):
        i = sigmoid_normal(10)
    print(t_sigmoid1.stop())
    t_sigmoid2 = timer()
    t_sigmoid2.start()
    for x in range(t):
        i = sigmoid_derivative(10)
    print(t_sigmoid2.stop())

    print('TANH')
    t_tan1 = timer()
    t_tan1.start()
    for x in range(t):
        i = tanh_normal(10)
    print(t_tan1.stop())
    t_tan2 = timer()
    t_tan2.start()
    for x in range(t):
        i = tanh_derivative(10)
    print(t_tan2.stop())

    print('LINEAR-RELU')
    t_lr1 = timer()
    t_lr1.start()
    for x in range(t):
        i = linearRELU_normal(10)
    print(t_lr1.stop())
    t_lr2 = timer()
    t_lr2.start()
    for x in range(t):
        i = linearRELU_derivative(10)
    print(t_lr2.stop())

    print('LEAKY-RELU')
    t_er1 = timer()
    t_er1.start()
    for x in range(t):
        i = leakyRELU_normal(10)
    print(t_er1.stop())
    t_er2 = timer()
    t_er2.start()
    for x in range(t):
        i = leakyRELU_derivative(10)
    print(t_er2.stop())
Exemple #2
0
#!/usr/bin/env python3

import time
import functions

# ===== config =====

# tests to run
tests = {
    # testName: (testFunction, arguments)
    "Fibonacci": (functions.fib, 30)
}

# number of times to run
num = 10

if __name__ == "__main__":
    for pair in tests:
        testName, (testF, *args) = pair, tests[pair]

        print("===== running %s test =====" % testName)
        # print("Parameters:", *args)
        functions.timer(num, testF, *args)
    def train_bultmann(self,
                       tr_loader,
                       tr_dataset_length,
                       Adam=True,
                       scheduler=True):

        since = time.time()
        print('Training the network {}'.format(
            self.network.__class__.__name__))
        print('Network Architecture \n{}'.format(self.network))
        print('Network Criterion {}'.format(self.network_criterion))
        list_of_network_loss = []
        list_of_clustering_loss = []
        list_of_total_loss = []
        list_of_losses = []
        learning_rates = []
        list_of_centers = []
        list_of_ranks_of_center_distances = []
        list_of_center_distances = []

        if Adam:
            optimizer = torch.optim.Adam(self.network.parameters(),
                                         lr=self.lr,
                                         weight_decay=0.0)

        else:
            optimizer = torch.optim.SGD(self.network.parameters(),
                                        lr=self.lr,
                                        momentum=0.0,
                                        weight_decay=0.0,
                                        nesterov=False)

        if scheduler:
            scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                        step_size=1,
                                                        gamma=0.1)

        for epoch in range(self.n_epochs):

            embedded_representation = []
            batched_center_index = 0
            total_combined_loss = 0.0
            total_network_loss = 0.0
            total_clustering_loss = 0.0
            labels = np.empty((0, 1), float)

            for batch in tr_loader:

                #extract the sequence and label from the batch and make predictions and return bottleneck

                sequences = batch[:, :, 0:self.sequence_length].float()
                batch_labels = batch[:, :, self.sequence_length]
                labels = np.append(labels, batch_labels.numpy(), axis=0)
                target_sequences = sequences.clone()
                predictions, bottleneck = self.network(sequences)

                embedded_representation.append(bottleneck.clone().detach())
                batch_embeddings = torch.cat(embedded_representation)

                #compute the network loss

                network_loss = self.network_criterion(predictions,
                                                      target_sequences)

                #set condition for pretrain mode

                if epoch <= self.no_of_pretrain_epochs:

                    #pretrain mode

                    clustering_loss = torch.zeros([1, 1], dtype=torch.float64)
                    combined_loss = network_loss  # + self.alpha*clustering_loss   # defining the combined loss
                    optimizer.zero_grad()

                    #calculating the gradients and taking step with only network loss as the clustering loss is zero'

                    combined_loss.backward(
                        retain_graph=True
                    )  # retaining the pytorch computation graph so that backward can be done twice
                    optimizer.step()

                else:

                    #joint training mode

                    clustering_loss = self.clustering_criterion(
                        bottleneck,
                        batched_center_designation[batched_center_index])
                    batched_center_index += 1  # incrementing the batched center index
                    combined_loss = (
                        1 - self.alpha
                    ) * network_loss + self.alpha * clustering_loss
                    optimizer.zero_grad()

                    #calculating the gradients but not taking step

                    combined_loss.backward(retain_graph=True)

                    #updating the weights of the clustering friendly channels wrt combined loss

                    bottleneck_layer = functions.get_bottleneck_name(
                        self.network)

                    #train_reporter.print_grads(network)

                    with torch.no_grad():

                        for name, parameters in self.network.named_parameters(
                        ):

                            if name == bottleneck_layer:

                                ranked_channels = torch.from_numpy(
                                    ranks_of_center_distances)
                                parameters.grad[torch.where(
                                    ranked_channels <=
                                    self.no_of_clustering_channels)] = 0.0

                    optimizer.step()

                    #updating the weights of rest of the channels wrt network loss'

                    optimizer.zero_grad()
                    network_loss.backward()

                    with torch.no_grad():

                        for name, parameters in self.network.named_parameters(
                        ):

                            if name == bottleneck_layer:

                                ranked_channels = torch.from_numpy(
                                    ranks_of_center_distances)
                                parameters.grad[torch.where(
                                    ranked_channels >
                                    self.no_of_clustering_channels)] = 0.0

                    optimizer.step()

                total_network_loss += network_loss.item()
                total_clustering_loss += clustering_loss.item()
                total_combined_loss += combined_loss.item()
            #extract embeddings
            embeddings = batch_embeddings

            #make list of losses

            list_of_network_loss.append(total_network_loss /
                                        (tr_dataset_length) / self.batch_size)
            list_of_clustering_loss.append(
                total_clustering_loss / (tr_dataset_length) / self.batch_size)
            list_of_total_loss.append(total_combined_loss /
                                      (tr_dataset_length) / self.batch_size)

            #make cluster update interval array

            cluster_update = np.arange(self.no_of_pretrain_epochs,
                                       self.n_epochs,
                                       self.cluster_update_interval)

            #clustering
            for update in cluster_update:

                if update == epoch:
                    print('Updating Cluster Centers')
                    center_designation_pre = []
                    cluster_label_pre = []
                    centers_pre = []
                    no_of_channels = embeddings.shape[1]

                    for i in range(no_of_channels):
                        channel = embeddings[:, i, :].numpy()
                        choice_cluster, initial_centers, cluster_ass = functions.kmeansalter(
                            channel, self.n_clusters)
                        cluster_label_pre.append(
                            torch.from_numpy(choice_cluster).unsqueeze(
                                0).transpose(1, 0))
                        cluster_label = torch.cat(cluster_label_pre, dim=1)
                        centers_pre.append(
                            torch.from_numpy(initial_centers).unsqueeze(
                                0).transpose(1, 0))
                        centers = torch.cat(centers_pre, dim=1)
                        center_designation_pre.append(
                            cluster_ass.unsqueeze(0).transpose(1, 0))
                        center_designation = torch.cat(center_designation_pre,
                                                       dim=1)

                    batched_center_designation = list(
                        functions.divide_batches(center_designation,
                                                 self.batch_size))
                    center_distances, ranks_of_center_distances = functions.rank_channels(
                        centers)

            print(
                'Epoch : {}/{} Network Loss : {} Clustering Loss : {} Total Loss : {}'
                .format(epoch + 1, self.n_epochs,
                        (total_network_loss /
                         (tr_dataset_length / self.batch_size)),
                        (total_clustering_loss /
                         (tr_dataset_length / self.batch_size)),
                        (total_combined_loss /
                         (tr_dataset_length / self.batch_size))))

        list_of_centers.append(centers.numpy())
        list_of_ranks_of_center_distances.append(ranks_of_center_distances)
        list_of_center_distances.append(center_distances)
        list_of_losses.append(list_of_network_loss)
        list_of_losses.append(list_of_clustering_loss)
        list_of_losses.append(list_of_total_loss)
        end = time.time()
        hours, minutes, seconds = functions.timer(since, end)
        print("Time taken {:0>2}:{:0>2}:{:05.2f}".format(
            int(hours), int(minutes), seconds))
        return self.network, optimizer, list_of_network_loss, list_of_clustering_loss, list_of_total_loss, list_of_losses, embeddings, labels, list_of_centers, list_of_ranks_of_center_distances, list_of_center_distances