예제 #1
0
    def execute():
        """Fix Hardcoded Tray icons."""
        action = App.get("action")
        apps = App.get_supported_apps()
        done = []
        total_time = 0
        counter = 0
        total_counter = len(apps)
        for app in apps:
            app_name = app.name
            delta = app.do_action(action)
            total_time += delta

            if app.success:
                counter += 1
                if app_name not in done:
                    progress(counter, total_counter, delta, app_name)
                    done.append(app_name)
            else:
                total_counter -= 1

        if apps:
            print(_("Took {:.2f}s to finish the tasks").format(total_time))
        elif action == Action.APPLY:
            print(_("No apps to fix!"))
        elif action == Action.CLEAR_CACHE:
            print(_("No cache found."))
        else:
            print(_("No apps to revert!"))
예제 #2
0
    def train(self, training_set_inputs, training_set_outputs,
              number_of_training_iterations):
        for self.iteration in range(number_of_training_iterations):
            # Pass the training set through our neural network
            output_from_layer_1, output_from_layer_2 = self.think(
                training_set_inputs)

            # Calculate the error for layer 2 (The difference between the desired output
            # and the predicted output).
            layer2_error = training_set_outputs - output_from_layer_2
            layer2_delta = layer2_error * self.__sigmoid_derivative(
                output_from_layer_2)

            # Calculate the error for layer 1 (By looking at the weights in layer 1,
            # we can determine by how much layer 1 contributed to the error in layer 2).
            layer1_error = layer2_delta.dot(self.layer2.synaptic_weights.T)
            layer1_delta = layer1_error * self.__sigmoid_derivative(
                output_from_layer_1)

            # Calculate how much to adjust the weights byFalse
            layer1_adjustment = training_set_inputs.T.dot(layer1_delta)
            layer2_adjustment = output_from_layer_1.T.dot(layer2_delta)

            # Adjust the weights.
            self.layer1.synaptic_weights += layer1_adjustment
            self.layer2.synaptic_weights += layer2_adjustment

            outputs = self.iteration % 5000 == 0 and self.iteration != 0
            if outputs is True:

                #  output weights and a guess at current iteration
                self.print_weights(self.iteration)
                hidden_state, output = self.think(self.arrGuess)
                print("\u001b[33m" + str(output) + "\u001b[0m\n")

                #  Display weight information for all layers, and make an estimate at this stage
                if (outputs and debug and display) is True:

                    #  add current guess to graph
                    plt.plot(self.iteration, output, '-o')
                    plt.show()
                    plt.pause(0.001)

            utils.progress(self.iteration,
                           number_of_training_iterations,
                           status="I'm thinking...")
예제 #3
0
파일: tools.py 프로젝트: guilhermemg/bmtas
def train_branched(local_rank, world_size, device, start_epoch, max_epochs,
                   tasks, trainloader, model, loss, optimizer, scheduler,
                   exp_dir):

    writer = SummaryWriter(log_dir=exp_dir) if local_rank == 0 else None

    iter_per_epoch = len(
        trainloader.dataset) // (trainloader.batch_size * max(1, world_size))
    total_iter = iter_per_epoch * max_epochs

    model.train()
    for epoch in range(start_epoch, max_epochs + 1):

        if world_size > 1:
            trainloader.sampler.set_epoch(epoch)

        for batch_idx, samples in enumerate(trainloader):

            inputs = samples['image'].to(device, non_blocking=True)
            target = {
                task: samples[task].to(device, non_blocking=True)
                for task in tasks
            }

            optimizer.zero_grad()

            output = model(inputs)
            tot_loss = loss(output, target)
            tot_loss.backward()

            optimizer.step()

            if (batch_idx + 1) % 100 == 0 and local_rank == 0:
                current_loss = tot_loss.item()
                n_iter = (epoch - 1) * iter_per_epoch + batch_idx + 1
                print('Train Iterations: {}, Loss: {}'.format(
                    utils.progress(n_iter, total_iter), current_loss))
                writer.add_scalar('loss_current', current_loss, n_iter)
                writer.add_scalar('learning_rate',
                                  optimizer.param_groups[0]['lr'], n_iter)

        scheduler.step()

        if local_rank == 0:
            # save model
            state = {
                'state_dict': model.state_dict(),
                'tasks': tasks,
                'branch_config': model.branch_config,
                'epoch': epoch,
                'optimizer': optimizer.state_dict(),
                'scheduler': scheduler.state_dict()
            }
            torch.save(state, Path(exp_dir) / 'checkpoint.pth')
예제 #4
0
파일: tools.py 프로젝트: guilhermemg/bmtas
def train_search(device, start_epoch, max_epochs, tasks, trainloader_weight,
                 trainloader_arch, model, loss, optimizer_weight,
                 optimizer_arch, exp_dir):

    writer = SummaryWriter(log_dir=exp_dir)

    iter_per_epoch = len(
        trainloader_weight.dataset) // trainloader_weight.batch_size
    total_iter = iter_per_epoch * max_epochs
    delay_epochs = max_epochs // 20

    model.train()
    for epoch in range(start_epoch, max_epochs + 1):

        model.warmup_flag = (epoch <= delay_epochs)
        # set the gumbel temperature according to a linear schedule
        model.gumbel_temp = min(
            5.0 - (epoch - delay_epochs - 1) /
            (max_epochs - delay_epochs - 1) * (5.0 - 0.1), 5.0)

        arch_loss = 0
        arch_counter = 0

        if epoch > delay_epochs:
            print('modifying architecture...')

            # we reset the arch optimizer state
            optimizer_arch.state = defaultdict(dict)

            # we use current batch statistics in search period
            model.freeze_encoder_bn_running_stats()

            for samples_search in trainloader_arch:

                inputs_search = samples_search['image'].to(device,
                                                           non_blocking=True)
                target_search = {
                    task: samples_search[task].to(device, non_blocking=True)
                    for task in tasks
                }

                optimizer_arch.zero_grad()

                for task in tasks:
                    # many images don't have human parts annotations, skip those
                    uniq = torch.unique(target_search[task])
                    if len(uniq) == 1 and uniq[0] == 255:
                        continue

                    output = model(inputs_search, task=task)
                    tot_loss = loss(output, target_search, task=task)
                    tot_loss.backward()

                    arch_loss += tot_loss.item()
                    arch_counter += 1

                optimizer_arch.step()

            # we reset the main optimizer state because arch has changed
            optimizer_weight.state = defaultdict(dict)

            # we should reset bn running stats
            model.unfreeze_encoder_bn_running_stats()
            model.reset_encoder_bn_running_stats()

        for batch_idx, samples in enumerate(trainloader_weight):

            inputs = samples['image'].to(device, non_blocking=True)
            target = {
                task: samples[task].to(device, non_blocking=True)
                for task in tasks
            }

            current_loss = 0
            counter = 0

            for task in tasks:
                # many images don't have human parts annotations, skip those
                uniq = torch.unique(target[task])
                if len(uniq) == 1 and uniq[0] == 255:
                    continue

                optimizer_weight.zero_grad()

                output = model(inputs, task=task)
                tot_loss = loss(output, target, task=task, omit_resource=True)
                tot_loss.backward()

                optimizer_weight.step()

                current_loss += tot_loss.item()
                counter += 1

            if (batch_idx + 1) % 100 == 0:
                n_iter = (epoch - 1) * iter_per_epoch + batch_idx + 1
                print('Train Iterations: {}, Loss: {:.4f}'.format(
                    utils.progress(n_iter, total_iter),
                    current_loss / counter))
                writer.add_scalar('loss_current', current_loss / counter,
                                  n_iter)
                writer.add_scalar('arch_loss',
                                  arch_loss / max(1, arch_counter), n_iter)
                writer.add_scalar('gumbel_temp', model.gumbel_temp, n_iter)
                for name, param in model.named_arch_parameters():
                    writer.add_image(name,
                                     torch.nn.functional.softmax(param.data,
                                                                 dim=-1),
                                     n_iter,
                                     dataformats='HW')

        # save model
        state = {
            'state_dict': model.state_dict(),
            'tasks': tasks,
            'epoch': epoch,
            'optimizer_weight': optimizer_weight.state_dict(),
            'optimizer_arch': optimizer_arch.state_dict(),
        }
        torch.save(state, Path(exp_dir) / 'checkpoint.pth')

    branch_config = model.get_branch_config()
    utils.write_json({'config': branch_config},
                     Path(exp_dir) / 'branch_config.json')