Пример #1
0
class CarbonUsageTracking(Callback):
    """
    tf.keras Callback for the Carbontracker package.
    See https://github.com/lfwa/carbontracker.
    """
    def __init__(self,
                 epochs,
                 add_to_logs=True,
                 monitor_epochs=-1,
                 epochs_before_pred=-1,
                 devices_by_pid=True,
                 **additional_tracker_kwargs):
        """
        Accepts parameters as per CarbonTracker.__init__
        Sets other default values for key parameters.

        Args:
            add_to_logs: bool, Add total_energy_kwh and total_co2_g to the keras logs after each epoch
            For other arguments, please refer to CarbonTracker.__init__
        """
        super().__init__()
        self.tracker = None
        self.add_to_logs = bool(add_to_logs)
        self.parameters = {
            "epochs": epochs,
            "monitor_epochs": monitor_epochs,
            "epochs_before_pred": epochs_before_pred,
            "devices_by_pid": devices_by_pid
        }
        self.parameters.update(additional_tracker_kwargs)

    def on_train_end(self, logs={}):
        """ Ensure actual consumption is reported """
        self.tracker.stop()

    def on_epoch_begin(self, epoch, logs={}):
        """ Start tracking this epoch """
        if self.tracker is None:
            # At this point all CPUs should be discoverable
            self.tracker = CarbonTracker(**self.parameters)
        self.tracker.epoch_start()

    def on_epoch_end(self, epoch, logs={}):
        """ End tracking this epoch """
        self.tracker.epoch_end()
        if self.add_to_logs:
            energy_kwh = self.tracker.tracker.total_energy_per_epoch().sum()
            co2eq_g = self.tracker._co2eq(energy_kwh)
            logs["total_energy_kwh"] = round(energy_kwh, 6)
            logs["total_co2_g"] = round(co2eq_g, 6)
Пример #2
0
    print("Model:" + fName + "Number of parameters:%d" % (nParam), file=f)
makeLogFile(logFile)

criterion = nn.BCELoss(reduction='mean')
accuracy = dice

net.to(device)
optimizer = torch.optim.Adam(net.parameters(), lr=args.lr, weight_decay=1e-5)
nTrain = len(train_loader)
nValid = len(valid_loader)

minLoss = 1e5
convIter = 0
convCheck = 20
tracker = CarbonTracker(epochs=args.epochs,
                        log_dir='../logs/',
                        monitor_epochs=-1)
beta = args.beta

for epoch in range(args.epochs):
    tracker.epoch_start()
    trLoss = []
    vlLoss = []
    trAcc = []
    vlAcc = []
    t = time.time()

    for step, (patch, mask) in enumerate(train_loader):
        patch = patch.to(device)
        mask = mask.to(device)
        kl, pred = net(patch)
Пример #3
0
    print(f"Using Adam w/ learning rate = {args.lr:.1e}", file=f)
    print("Local feature map dim: %d, nCh: %d, B:%d" %
          (feature_dim, nCh, batch_size),
          file=f)

model = model.to(device)
nValid = len(loader_valid)
nTrain = len(loader_train)
nTest = len(loader_test)

maxAuc = 0
minLoss = 1e3
convCheck = 20
convIter = 0
tracker = CarbonTracker(epochs=args.num_epochs,
                        log_dir='carbontracker/',
                        monitor_epochs=-1)

# Let's start training!
for epoch in range(args.num_epochs):
    tracker.epoch_start()
    running_loss = 0.
    running_acc = 0.
    t = time.time()
    model.train()
    predsNp = []
    labelsNp = []
    bNum = 0
    for i, (inputs, labels) in enumerate(loader_train):
        #           optimizer.zero_grad()
        for p in model.parameters():
Пример #4
0
 def on_epoch_begin(self, epoch, logs={}):
     """ Start tracking this epoch """
     if self.tracker is None:
         # At this point all CPUs should be discoverable
         self.tracker = CarbonTracker(**self.parameters)
     self.tracker.epoch_start()
Пример #5
0
def train(model, criterion, train_loader, test_loader, max_epochs=50, 
          learning_rate=0.001):


    tracker = CarbonTracker(epochs=max_epochs, monitor_epochs=max_epochs, components="gpu", verbose=2)
    
    dataloaders = {
        "train":train_loader, "val": test_loader
    }

    optimizer = optim.SGD(model.parameters(), lr=learning_rate, momentum=0.9)

    best_acc = 0
    for epoch in range(max_epochs):

        tracker.epoch_start()

        print('Epoch {}/{}'.format(epoch, max_epochs - 1))
        #print('-' * 10)

        # Each epoch has a training and validation phase
        for phase in ['val', 'train']:
            if phase == 'train':
                model.train(True)  # Set model to training mode
            else:
                model.train(False)  # Set model to evaluate mode

            running_loss = 0.0
            running_corrects = 0

            for data in dataloaders[phase]:
                # get the inputs
                inputs, labels = data
                labels = labels.view(labels.size(0)).to(model.device)

                inputs, labels = Variable(inputs), Variable(labels)
                optimizer.zero_grad()
                inputs = inputs.to(model.device)

                outputs = model(inputs).to(model.device)
                _, preds = torch.max(outputs.data, 1)
                loss = criterion(outputs, labels)

                # backward + optimize only if in training phase
                if phase == 'train':
                    loss.backward()
                    optimizer.step()
                # statistics
                running_loss += loss.data * inputs.size(0)
                running_corrects += torch.sum(preds == labels.data)

            epoch_loss = running_loss / len(dataloaders[phase])
            epoch_acc = running_corrects / len(dataloaders[phase])

            print('{} Loss: {:.4f} Acc: {:.4f}'.format(
                phase, epoch_loss, epoch_acc))

            direct = os.path.join("models2")
            os.makedirs(direct, exist_ok=True)

            # deep copy the model
            if phase == 'val' and epoch_acc > best_acc:
                best_acc = epoch_acc
                best_model_wts = copy.deepcopy(model.state_dict())
                name = "models2/model-%s.weights" % epoch
                f = open(name, "w+")
                f.close()
                torch.save(best_model_wts, name)

        tracker.epoch_end()

    # tracker.stop() in case of early stop of training
    tracker.stop() 

    print('Training complete')
    print('Best val Acc: {:4f}'.format(best_acc))
Пример #6
0
                  n_actions=4,
                  fc1_dims=2048,
                  fc2_dims=1024)
    if len(sys.argv) > 1:
        model_weight = sys.argv[1]
        agent.load_model(model_weight)

    n_games = 2000

    frame_number = 0
    tile_visited_history = []
    avg_tile_visited_history = []

    tracker = CarbonTracker(epochs=n_games,
                            epochs_before_pred=n_games // 10,
                            monitor_epochs=n_games,
                            components="gpu",
                            verbose=2)
    max_tiles = 0
    for ep in range(n_games):
        tracker.epoch_start()
        done = False
        observation = env.reset()
        env.seed(SEED)
        score = 0

        while not done:
            if ep >= 0:
                env.render()
            pre = env.tile_visited_count
            if frame_number == 0: