def val(epoch, model, dataloader, loss_fn, writer, world_size, best_acc): param = next(model.parameters()) device = param.device # torch.device cuda = param.is_cuda # bool model.eval() pbar = enumerate(dataloader) pbar = tqdm(pbar, total=len(dataloader), desc='val') running_loss = 0.0 running_corrects = 0 num_samples = 0 pf = '' # progress bar postfix for i, (inputs, labels) in pbar: inputs = inputs.to(device) labels = labels.to(device) with autocast(enabled=cuda): outputs = model(inputs) _, preds = torch.max(outputs, 1) loss = loss_fn(outputs, labels) loss *= world_size # for DDP world_size > 1 mem = torch.cuda.memory_reserved() / 1E9 if cuda else 0 pf += f'mem: {mem:.3f}GB, ' running_loss += loss.item() * inputs.size(0) running_corrects += torch.sum(preds == labels.data) num_samples += inputs.size(0) pf += f'loss: {running_loss / num_samples:.4f}, ' pf += f'acc: {running_corrects.double() / num_samples:.4f}, ' pf += f'epoch: {epoch}' pbar.set_postfix_str(pf) epoch_acc = running_corrects.double() / num_samples if epoch_acc > best_acc: best_acc = epoch_acc CHECKPOINT_PATH = tempfile.gettempdir() + '/model.checkpoint' # All processes should see same parameters as they all start # from same random parameters and gradients are synchronized # in backward passes. Therefore, saving it in one process is # sufficient. # model state_dict msd = model.module.state_dict() if is_parallel( model) else model.state_dict() torch.save(msd, CHECKPOINT_PATH) return best_acc
def sort_parallel(self, n): for cls in range( n ): # there is n+1 classes but the last one would be "found" if we have found others # pick a representative of cls idx_repr = cls * n idx_to_swap = cls * n + 1 idx_to_check = cls * n + 1 # and search for its remaining n-1 parallel friends while idx_to_swap - idx_repr < n: if is_parallel(self.incidence_matrix[idx_repr], self.incidence_matrix[idx_to_check]): tmp = self.incidence_matrix[idx_to_swap] self.incidence_matrix[idx_to_swap] = self.incidence_matrix[ idx_to_check] self.incidence_matrix[idx_to_check] = tmp idx_to_swap += 1 idx_to_check += 1
def fitness_function(genome): """ Given a candidate solution will return its fitness score assuming the cantus_firmus in this closure. Caches the fitness score in the genome. """ # Save some time! if genome.fitness is not None: return genome.fitness # The fitness score to be returned. fitness_score = 0.0 # Counts the number of repeated notes. repeats = 0 # Counts the amount of parallel motion. parallel_motion = 0 # Counts the number of jumps in the melodic contour. jump_contour = 0 contrapunctus = genome.chromosome # Make sure the solution starts correctly (at a 5th or octave). first_interval = contrapunctus[0] - cantus_firmus[0] if first_interval == 7 or first_interval == 4: fitness_score += REWARD_FIRST else: fitness_score -= PUNISH_FIRST # Make sure the solution finishes correctly (at an octave). if contrapunctus[-1] - cantus_firmus[-1] == 7: fitness_score += REWARD_LAST else: fitness_score -= PUNISH_LAST # Ensure the penultimate note is step wise onto the final note. if abs(contrapunctus[-1] - contrapunctus[-2]) == 1: fitness_score += REWARD_LAST_STEP else: fitness_score -= PUNISH_LAST_STEP # Reward contrary motion onto the final note. cantus_firmus_motion = cantus_firmus[-1] - cantus_firmus[-2] contrapunctus_motion = contrapunctus[-1] - contrapunctus[-2] if ((cantus_firmus_motion < 0 and contrapunctus_motion > 0) or (cantus_firmus_motion > 0 and contrapunctus_motion < 0)): fitness_score += REWARD_LAST_MOTION else: fitness_score -= PUNISH_LAST_MOTION # Make sure the penultimate note isn't a repeated note. penultimate_preparation = abs(contrapunctus[-2] - contrapunctus[-3]) if penultimate_preparation == 0: fitness_score -= PUNISH_REPEATED_PENULTIMATE else: # Make sure the movement to the penultimate note isn't from too # far away (not greater than a third). if penultimate_preparation < 2: fitness_score += REWARD_PENULTIMATE_PREPARATION else: fitness_score -= PUNISH_PENULTIMATE_PREPARATION # Check the fitness of the body of the solution. last_notes = (contrapunctus[0], cantus_firmus[0]) last_interval = last_notes[0] - last_notes[1] for i in range(1, len(contrapunctus) - 1): contrapunctus_note = contrapunctus[i] cantus_firmus_note = cantus_firmus[i / 4] current_notes = (contrapunctus_note, cantus_firmus_note) current_interval = contrapunctus_note - cantus_firmus_note # Punish parallel fifths or octaves. if ((current_interval == 4 or current_interval == 7) and (last_interval == 4 or last_interval == 7)): fitness_score -= PUNISH_PARALLEL_FIFTHS_OCTAVES # Check for parallel motion. if is_parallel(last_notes, current_notes): parallel_motion += 1 # Check if the melody is a repeating note. if contrapunctus_note == last_notes[0]: repeats += 1 # Check the melodic contour. contour_leap = abs(current_notes[0] - last_notes[0]) if contour_leap >= 2: jump_contour += contour_leap - 2 # Ensure dissonances are part of a step-wise movement. if i % 2 and current_interval in DISSONANCES: # The current_note is a dissonance on the third beat of a bar. # Check that both the adjacent notes are only a step away. if is_stepwise_motion(contrapunctus, i): fitness_score += REWARD_STEPWISE_MOTION else: fitness_score -= PUNISH_STEPWISE_MOTION else: if is_stepwise_motion(contrapunctus, i): fitness_score += REWARD_STEPWISE_MOTION last_notes = current_notes last_interval = current_interval # Punish too many (> 1/3) repeated notes. if repeats > repeat_threshold: fitness_score -= PUNISH_REPEATS # Punish too many (> 1/3) parallel movements. if parallel_motion > repeat_threshold: fitness_score -= PUNISH_PARALLEL # Punish too many large leaps in the melody. if jump_contour > jump_threshold: fitness_score -= PUNISH_LEAPS genome.fitness = fitness_score return fitness_score
def fitness_function(genome): """ Given a candidate solution will return its fitness score assuming the cantus_firmus in this closure. Caches the fitness score in the genome. """ # Save some time! if genome.fitness is not None: return genome.fitness # The fitness score to be returned. fitness_score = 0.0 # Counts the number of repeated notes in the contrapunctus. repeats = 0 # Counts consecutive parallel thirds. thirds = 0 # Counts consecutive parallel sixths. sixths = 0 # Counts the amount of parallel motion. parallel_motion = 0 # Counts the number of jumps in the melodic contour. jump_contour = 0 contrapunctus = genome.chromosome # Make sure the solution starts correctly (at a 5th or octave). first_interval = contrapunctus[0] - cantus_firmus[0] if first_interval == 7 or first_interval == 4: fitness_score += REWARD_FIRST else: fitness_score -= PUNISH_FIRST # Make sure the solution finishes correctly (at an octave). if contrapunctus[-1] - cantus_firmus[-1] == 7: fitness_score += REWARD_LAST else: fitness_score -= PUNISH_LAST # Ensure the penultimate note is step wise onto the final note. if abs(contrapunctus[-1] - contrapunctus[-2]) == 1: fitness_score += REWARD_LAST_STEP else: fitness_score -= PUNISH_LAST_STEP # Reward contrary motion onto the final note. cantus_firmus_motion = cantus_firmus[-1] - cantus_firmus[-2] contrapunctus_motion = contrapunctus[-1] - contrapunctus[-2] if ((cantus_firmus_motion < 0 and contrapunctus_motion > 0) or (cantus_firmus_motion > 0 and contrapunctus_motion < 0)): fitness_score += REWARD_LAST_MOTION else: fitness_score -= PUNISH_LAST_MOTION # Make sure the penultimate note isn't a repeated note. penultimate_preparation = abs(contrapunctus[-2] - contrapunctus[-3]) if penultimate_preparation == 0: fitness_score -= PUNISH_REPEATED_PENULTIMATE else: # Make sure the movement to the penultimate note isn't from too # far away (not greater than a third). if penultimate_preparation < 2: fitness_score += REWARD_PENULTIMATE_PREPARATION else: fitness_score -= PUNISH_PENULTIMATE_PREPARATION # Check the fitness of the body of the solution. solution = zip(contrapunctus, cantus_firmus) last_notes = solution.pop() last_interval = last_notes[0] - last_notes[1] for contrapunctus_note, cantus_firmus_note in solution[1:]: current_notes = (contrapunctus_note, cantus_firmus_note) current_interval = contrapunctus_note - cantus_firmus_note # Punish parallel fifths or octaves. if ((current_interval == 4 or current_interval == 7) and (last_interval == 4 or last_interval == 7)): fitness_score -= PUNISH_PARALLEL_FIFTHS_OCTAVES # Check if the melody is a repeating note. if contrapunctus_note == last_notes[0]: repeats += 1 # Check for parallel thirds. if current_interval == 2 and last_interval == 2: thirds += 1 # Check for parallel sixths. if current_interval == 4 and last_interval == 4: sixths += 1 # Check for parallel motion. if is_parallel(last_notes, current_notes): parallel_motion += 1 # Check the melodic contour. contour_leap = abs(current_notes[0] - last_notes[0]) if contour_leap > 2: jump_contour += contour_leap - 2 last_notes = current_notes last_interval = current_interval # Punish too many (> 1/3) repeated notes. if repeats > repeat_threshold: fitness_score -= PUNISH_REPEATS # Punish too many (> 1/3) parallel thirds if thirds > repeat_threshold: fitness_score -= PUNISH_THIRDS # Punish too many (> 1/3) parallel sixths. if sixths > repeat_threshold: fitness_score -= PUNISH_SIXTHS # Punish too many (> 1/3) parallel movements. if parallel_motion > repeat_threshold: fitness_score -= PUNISH_PARALLEL # Punish too many large leaps in the melody. if jump_contour > jump_threshold: fitness_score -= PUNISH_LEAPS genome.fitness = fitness_score return fitness_score
net = getattr(import_module('lightcnn'), module_name) model = net(num_classes=num_classes, channels=18) is_available(model) # is_adaptive(model,fc_num=fc_num,num_classes=num_classes,num_channels = num_channels) # channels_conv(model,fc_num=fc_num,num_classes=num_classes) # load(model,train_mode,pretrained_path=pretrained_path) # In[28]: # optimizer print('create optimizer......') optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay) is_parallel(optimizer) # loss print('create loss......') # criterion = nn.CrossEntropyLoss(weight=class_weights) # nn.MSELoss() criterion = FocalLoss(class_num=num_classes, alpha=class_weights, gamma=2) # In[29]: # train # pass model, loss, optimizer and dataset to the trainer print('=' * 10) print('train......') print('=' * 10) e = Trainer(model, criterion, optimizer,
num_workers=num_workers) valid_loader = DataLoader(dataset=valid_dataset, shuffle=False, batch_size=valid_batch_size, num_workers=num_workers) # print('data_loader end......') # # print('model load......') net = getattr(import_module('torchvision.models'), module_name) model = net(num_classes=num_classes) is_adaptive(model, fc_num=fc_num, num_classes=num_classes, num_channels=num_channels) is_available(model) model = is_parallel(model) # load(model,train_mode,pretrained_path=pretrained_path) # # # # # In[28]: # # # optimizer # print('create optimizer......') optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay) optimizer = is_parallel(optimizer) # loss print('create loss......') criterion = nn.CrossEntropyLoss(weight=class_weights) # nn.MSELoss()