Exemple #1
0
def from_initial():
	# Returns a chess object with pieces at their initial positions
	history_stack = History()
	board = Board(history_stack)
	for piece_cls, player, square in INITIAL_POSITIONS:
		board.add(piece_cls(player, board), square)
	return Chess(board, Player.WHITE, history_stack)
Exemple #2
0
 def __init__(self):
     QtGui.QMainWindow.__init__(self)
     
     self.logfile = open(BASEPATH+"/log.txt", "w")
     self.logfile.write("")
     self.logfile.close()
     self.logfile = open(BASEPATH+"/log.txt", "a")
     
     self.addons = []
     
     #load the config
     self.config = Config(CONFIGPATH)
     if  not os.path.exists(CONFIGPATH):
         self.initializeConfig()
     self.config.loadData()
     
     self.history = History(100)
     
     self.translations = translations.Translations(self.config["language"])
     
     self.project = widgets.ProjectExplorer(self, "")
     self.projectPath = ""
     
     self.baseModClass = None
     self.guiClass = None
     
     self.initUI()
     self.initializeAddons()
Exemple #3
0
 def fill_comp_ordered_history_list(file_path):
     with open(file_path) as f:
         hist_sentence_list = []
         for idx, line in enumerate(f):
             splited_words = split(' |,\n', line[:-1]) if line[-1] == '\n' else split(' |,\n', line)
             new_sentence_hist_list = []
             for word in splited_words:
                 cur_hist = History(cword=word, pptag=None, ptag=None, ctag=None, nword=None, pword=None,
                                    ppword=None, nnword=None)
                 new_sentence_hist_list.append(cur_hist)
             hist_sentence_list.append(new_sentence_hist_list)
     return hist_sentence_list
Exemple #4
0
 def __init__(self):
     super(MyCalcWindow, self).__init__()
     self.setupUi(self)
     self.setFixedSize(self.width(), self.height())  # lock the window size
     ##################################
     self.PRECISION = self.verticalSlider.value()
     self.history = ""
     self._history = History(parent=self)
     self.compute_times = 0
     self.lcdNumber.display(self.PRECISION)
     self.connecter()
     self.show()
     self.press_AC()
Exemple #5
0
    def _calc_hist_dicts(hist_sentence_list: [[History]], q: mp.Queue, self):
        """ method that calculates  two dictionaries - hist_to_all_tag_feature_matrix_dict and hist_to_feature_vec_dict

        :param hist_sentence_list: sentenc
        :param q:
        :param self:
        :return:
        """
        hist_to_feature_vec_dict = dict()
        hist_to_all_tag_feature_matrix_dict = dict()
        for idx_sentence, sentence in enumerate(hist_sentence_list):
            if idx_sentence % 500 == 0:
                gc.collect()
                print(f'filling sentence number {idx_sentence}')
            for hist in sentence:
                tag_set = self.tags_set
                cur_feature_vecs = []
                hist_to_feature_vec_dict[hist] = csr_matrix(self.get_non_zero_feature_vec_indices_from_history(hist))
                for ctag in tag_set:
                    new_hist = History(cword=hist.cword, pptag=hist.pptag, ptag=hist.ptag,
                                       ctag=ctag, nword=hist.nword, pword=hist.pword,
                                       nnword=hist.nnword, ppword=hist.ppword)

                    cur_feature_vecs.append(self.get_non_zero_feature_vec_indices_from_history(new_hist))

                key_all_tag_hist = History(cword=hist.cword, pptag=hist.pptag, ptag=hist.ptag,
                                           ctag=None, nword=hist.nword, pword=hist.pword,
                                           nnword=hist.nnword, ppword=hist.ppword)

                # fill dict that contains matrices with dim num_tagsXnum_features, it will be used to speed up operations
                if hist_to_all_tag_feature_matrix_dict.get(key_all_tag_hist, None) is None:
                    sparse_res = csr_matrix(cur_feature_vecs)

                    # sparse_mem = sparse_res.data.nbytes + sparse_res.indptr.nbytes + sparse_res.indices.nbytes
                    hist_to_all_tag_feature_matrix_dict[key_all_tag_hist] = sparse_res

        q.put((hist_to_all_tag_feature_matrix_dict, hist_to_feature_vec_dict))
Exemple #6
0
 def calc_normalization_term(v, sentence_history_list,
                             hist_to_all_tag_feature_matrix_dict):
     norm = 0.
     for sentence in sentence_history_list:
         for hist in sentence:
             new_hist_key = History(cword=hist.cword,
                                    pptag=hist.pptag,
                                    ptag=hist.ptag,
                                    ctag=None,
                                    nword=hist.nword,
                                    pword=hist.pword,
                                    nnword=hist.nnword,
                                    ppword=hist.ppword)
             mat = hist_to_all_tag_feature_matrix_dict[new_hist_key]
             norm += np.log(np.sum(np.exp(mat @ v)))
     return norm
Exemple #7
0
def main():
    # Load augmented images and keypoints
    training_images = np.load('data/training_images_augs.npy')
    training_labels = np.load('data/training_keypoints_augs.npy')
    valid_images = np.load('data/valid_images_augs.npy')
    valid_labels = np.load('data/training_keypoints_augs.npy')

    # divide by 255
    training_images = training_images / 255
    valid_images = training_images / 255

    # Construct a model
    model = my_model()

    # Compile
    #    opt = keras.optimizers.SGD(lr=1e-2, momentum=0.9, nesterov=True)
    opt = keras.optimizers.Adam(lr=1e-4, decay=1e-6)
    model.compile(optimizer=opt, loss=root_mean_squared_error)

    # Callbacks
    save_dir = 'tmp/saved_model/weights.{epoch:02d}-{val_loss:.2f}.hdf5'
    ckpt = keras.callbacks.ModelCheckpoint(save_dir,
                                           monitor='val_loss',
                                           verbose=1,
                                           save_best_only=True,
                                           period=25)
    hists = History()

    callbacks = [hists, ckpt]

    # Training
    model.fit(training_images,
              training_labels,
              batch_size=64,
              epochs=5,
              validation_data=(valid_images, valid_labels),
              callbacks=callbacks)
    # Plot loss curves
    hists.plot()

    # Visualize predictions
    test_images = np.load('data/test_images.npy') / 255
    preds = model.predict(test_images[..., np.newaxis], batch_size=256)
    idx = np.random.randint(preds.shape[0])
    plt.imshow(test_images[idx], cmap='gray')
    plt.scatter(preds[idx][:, 0], preds[idx][:, 1], c='r', s=20, marker='.')
    plt.show()
Exemple #8
0
    def calc_expected_counts(history_sentence_list, v,
                             hist_to_all_tag_feature_matrix_dict):
        expected_counts = np.zeros(v.shape, np.float64)
        for sentence in history_sentence_list:
            for hist in sentence:
                new_hist_key = History(cword=hist.cword,
                                       pptag=hist.pptag,
                                       ptag=hist.ptag,
                                       ctag=None,
                                       nword=hist.nword,
                                       pword=hist.pword,
                                       nnword=hist.nnword,
                                       ppword=hist.ppword)
                mat = hist_to_all_tag_feature_matrix_dict[new_hist_key]
                exp_mat = np.exp(mat @ v)
                prob_mat = exp_mat / np.sum(exp_mat)
                expected_counts += prob_mat * mat

        return expected_counts
Exemple #9
0
    def fill_tagged_ordered_history_list(file_path, is_test=False):
        with open(file_path) as f:
            hist_sentence_list = []
            for idx, line in enumerate(f):
                # splited_words = split(' |,\n', line[:-1]) if line[-1] == '\n' else split(' |,\n', line)  # remove \n from last part of sentence
                if not is_test:
                    splited_words = split(' |,\n', line[:-1])
                    del splited_words[-1]
                else:
                    splited_words = split(' |,\n', line[:-1]) if line[-1] == '\n' else split(' |,\n', line)
                new_sentence_hist_list = []
                for word_idx in range(len(splited_words)):
                    cword, ctag = split('_', splited_words[word_idx])

                    # check if first in sentence
                    if word_idx == 0:
                        pword = WordAndTagConstants.PWORD_SENTENCE_BEGINNING
                        ptag = WordAndTagConstants.PTAG_SENTENCE_BEGINNING
                        pptag = WordAndTagConstants.PPTAG_SENTENCE_BEGINNING
                        ppword = WordAndTagConstants.PPWORD_SENTENCE_BEGINNING
                    else:
                        prev_hist_idx = word_idx - 1
                        pword = new_sentence_hist_list[prev_hist_idx].cword
                        ptag = new_sentence_hist_list[prev_hist_idx].ctag
                        pptag = new_sentence_hist_list[prev_hist_idx].ptag
                        ppword = new_sentence_hist_list[prev_hist_idx].pword

                    # check if last in sentence
                    if word_idx + 2 < len(splited_words):
                        nword, _ = split('_', splited_words[word_idx+1])
                        nnword, _ = split('_', splited_words[word_idx+2])
                    elif word_idx + 1 < len(splited_words):
                        nword, _ = split('_', splited_words[word_idx+1])
                        nnword = WordAndTagConstants.NWORD_SENTENCE_END
                    else:
                        nword = WordAndTagConstants.NWORD_SENTENCE_END
                        nnword = WordAndTagConstants.NNWORD_SENTENCE_END
                    cur_hist = History(cword=cword, pptag=pptag, ptag=ptag, ctag=ctag, nword=nword, pword=pword,
                                       ppword=ppword, nnword=nnword)
                    new_sentence_hist_list.append(cur_hist)
                hist_sentence_list.append(new_sentence_hist_list)
        return hist_sentence_list
Exemple #10
0
    def calc_prob_for_hist(self, cur_hist, prev_pi, u):
        pptag_len = len(prev_pi)
        cur_pi = []
        for pptag in range(pptag_len):
            if prev_pi[pptag] == 0:
                cur_pi.append(np.zeros(len(self.tags_list), dtype=np.float64))
                continue
            pptag = self.index_to_tag[pptag]
            dots = []
            tags_set = self.get_possible_tag_set_from_word(cur_hist.cword)

            if len(tags_set) > 1:
                for c_tag in self.tags_list[:-2]:
                    if c_tag in tags_set:
                        n_hist = History(cword=cur_hist.cword,
                                         pptag=pptag,
                                         ptag=u,
                                         nword=cur_hist.nword,
                                         pword=cur_hist.pword,
                                         ctag=c_tag,
                                         nnword=cur_hist.nnword,
                                         ppword=cur_hist.ppword)

                        dot_prod = self.v.dot(
                            self.get_feature_from_hist(n_hist))
                        dots.append(dot_prod)
                    else:
                        dots.append(-np.inf)
                # exp_arr = np.exp(np.array(dots) - max(dots))
                exp_arr = np.exp(np.array(dots))
                prob_arr = exp_arr / np.sum(exp_arr)

            else:
                prob_arr = np.zeros(shape=(self.pi_tables.shape[1] - 2))
                prob_arr[self.tag_to_index[tags_set.pop()]] = 1
            prob_arr = np.append(np.append(prob_arr, 0), 0)
            cur_pi.append(prev_pi[self.tag_to_index[pptag]] * prob_arr)
        return np.array(cur_pi)
Exemple #11
0
                                          batch_size=64,
                                          shuffle=True,
                                          num_workers=5,
                                          pin_memory=True)

testset = torchvision.datasets.CIFAR10(root='./data',
                                       train=False,
                                       download=True,
                                       transform=transform)
testloader = torch.utils.data.DataLoader(testset,
                                         batch_size=64,
                                         shuffle=False,
                                         num_workers=5,
                                         pin_memory=True)

classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',
           'ship', 'truck')

model = Cifar10_ResNet44().cuda()

loss_fn = nn.CrossEntropyLoss(reduce=True)
optimizer_ft = optim.Adam(model.parameters(), lr=0.01)

history = History(model, "cifar10_transform", resume=True)
train_model(history,
            trainloader,
            testloader,
            loss_fn,
            optimizer_ft,
            num_epochs=30)
Exemple #12
0
def preprocessing(project_directory, var_matrix_path, full_matrix_path,
                  flag_file_path, **kwargs):
    """
    Groups variant sites into amplicon windows and filters
    out any amplicons that do not have well conserved
    upstream and downstream primer regions
    """
    logger = logging.getLogger(__name__)
    logger.info("BEGIN Preprocessing")
    args = _set_parameters(**kwargs)
    start_time = time.time()

    history = History(project_directory.make_new_file("history",
                                                      "preprocessing_history"),
                      "Preprocessing",
                      project_directory.timestamp,
                      param_dict=args)

    # Get strains and sites from matrix
    strains = _get_strains_from_file(var_matrix_path, args["sep"])
    sites = _get_sites_from_file(var_matrix_path, args["sep"])

    # Remove excluded strains
    if args["exclude_strains"] is not None:
        strains = _remove_strains(args["exclude_strains"], strains)

    var_matrix = _parse_var_matrix(var_matrix_path, strains, args["sep"])

    if args["strict"]:
        n_sites_before = len(sites)
        var_matrix, sites = _remove_ambiguous_sites(var_matrix, sites)
        logger.info("Strict Mode: {} sites with ambiguous "
                    "or missing data were removed".format(n_sites_before -
                                                          len(sites)))

    history.add_path("VARIANT SITE MATRIX FILE", var_matrix_path)
    history.add_parameter("Number of Sites", len(sites))
    history.add_parameter("Number of Strains", len(strains))

    _check_inputs(args["pz_size"], args["pz_filter_length"],
                  args["strain_cutoff"], len(strains))

    if full_matrix_path is not None:
        flag_df = get_flags_from_matrix(full_matrix_path, strains, history,
                                        project_directory, **args)

    if flag_file_path is not None:
        flag_df = get_flags_from_file(flag_file_path, history)

    flag_dic = _get_flags_from_counts(flag_df, args["strain_cutoff"])

    amplicon_filter = AmpliconFilter(sites, var_matrix, flag_dic,
                                     args['window'], args['pz_size'],
                                     args['pz_filter_length'],
                                     args['pz_filter_percent'], args['strict'])

    patterns = amplicon_filter.filter_amplicons_get_patterns()

    # Write patterns to a json file
    pattern_json_file = project_directory.make_new_file(
        "patterns", "patterns", "json")
    patterns.to_json(pattern_json_file, list(strains))
    history.add_path("PATTERN JSON", pattern_json_file)

    # Write history
    logger.info("FINISHED Preprocessing")
    run_time = time.time() - start_time
    history.add_other("Run Time", run_time)
    history.write()
Exemple #13
0
    def __tabla_completa(self, datos):
        # funciones internas
        def clean_lists(datos):
            salida = []
            for i in datos:
                if type(i) == float: 
                    salida.append("Sin nombre")
                elif len(i) <1: 
                    salida.append("Sin nombre")
                else: 
                    salida.append(i[0])
            return salida

        def clean_tags(datos): 
            salida = []
            for i in datos:
                if type(i) == float: 
                    salida.append("Sin tags")
                else: 
                    salida.append("_".join(i))
            return salida
        def fix_columns(datos) :
            columns = datos.columns
            for i in columns :
                datos = datos.rename( columns = {i:"{}".format(i.replace(".", "_"))} )
                #En algun momento aparece una columna llamada 'to' que produce syntax error
                try : 
                    datos = datos.rename(columns = {'to': 'to_'})
                except :
                    pass
            return datos
        
        tabla = pd.DataFrame()      
        for i in datos: 
            tabla = tabla.append(json_normalize(i), ignore_index = True, sort=True)
        tabla = tabla.drop(columns =['session.user_agent'])
        try: 
            tabla = tabla.drop(columns = ['message'])
        except:
            pass
        
        print("Tickets Extraidos : " + str(len(tabla)))
        
        tabla["agent_names"] = clean_lists(tabla['agent_names'])
        tabla['agent_ids'] = clean_lists(tabla['agent_ids'])
        tabla['tags'] = clean_tags(tabla['tags'])
        
        
        
        #Construye un dataframe para history y otro para webpath
        tabla_h = History(tabla).history
        tabla_wp = Webpath(tabla).webpath
        
        try:
            tabla_h['tags'] = clean_tags(tabla_h['tags'])
        except :
            pass
        
        
        try :
            tabla_h['new_tags'] = clean_tags(tabla_h['new_tags'])
        except :
            pass
        
        tabla = tabla.drop(columns=['history','webpath'])
        
        tabla['triggered_response'].fillna('False', inplace = True)
        tabla = tabla.astype({'triggered_response' : 'bool'})
        
        tabla['missed'].fillna('False', inplace = True)
        tabla = tabla.astype({'missed' : 'bool'})
        
        tabla['triggered'].fillna('False', inplace = True)
        tabla = tabla.astype({'triggered' : 'bool'})
        
        tabla['unread'].fillna('False', inplace = True)
        tabla = tabla.astype({'unread' : 'bool'})
            
#        print("Tickets extraidos (history)" ,len(tabla_h['id'].value_counts()))
#        print("Tickets extraidos (webpath)" ,len(tabla_wp['id'].value_counts()))
        
        tabla = fix_columns(tabla)
        tabla_h = fix_columns(tabla_h)
        tabla_wp = fix_columns(tabla_wp)
        
        tabla = tabla.astype({'response_time_avg' : 'float64'})
        tabla = tabla.astype({'response_time_first' : 'float64'})
        tabla = tabla.astype({'response_time_max' : 'float64'})
        
        self.history = tabla_h    
        self.webpath = tabla_wp
        self.tabla = tabla
        return tabla

experiments = args.name.split(',')
labels = args.label.split(',')

# construct matplotlib fig
fig, (ax_acc, ax_loss) = plt.subplots(1, 2, figsize=(18,7))

# plot axis
for i, (exp, label) in enumerate(zip(experiments, labels)):
    # open file
    f = open(os.path.join(args.base_path, exp, 'history.json'), 'r')
    hist = json.load(f)
    f.close()

    history = History().from_dict(hist).epoch_history

    # plot accuracy axis
    ax_acc.plot(history['acc'], color=('C'+str(i)), linestyle=':')
    ax_acc.plot(history['val_acc'], label=label, color=('C'+str(i)))

    # plot loss axis
    ax_loss.plot(history['loss'], color=('C'+str(i)), linestyle=':')
    ax_loss.plot(history['val_loss'], label=label, color=('C'+str(i)))


# add axis label
ax_acc.plot([], color='black', linestyle=':', label='dotted for train accuracy')
ax_acc.plot([], color='black', linestyle='-', label='solid for validation accuracy')
ax_acc.set_xlabel('epoch')
ax_acc.set_ylabel('accuracy')
Exemple #15
0
from torchsummary import summary

model = inception_v3(aux_logits=False)
inchannel = model.fc.in_features
print(inchannel)
model.fc = nn.Linear(inchannel, 5)
model = model.cuda()
summary(model, (3, 300, 300))

loss_fn = nn.CrossEntropyLoss(reduce=True)
optimizer_ft = optim.Adam(model.parameters(), lr=0.01)

transform = transforms.Compose(
    [transforms.Resize((300, 300)),
        transforms.ToTensor(),
        transforms.Normalize((0.64639061, 0.56044774, 0.61909978), (0.1491973, 0.17535066, 0.12751725))])

train_data = datasets.ImageFolder(
    "/hdd/yejiandong/datasets/overlap-0.25-dataset/wsi_train", transform=transform)
train_loader = DataLoader(train_data, batch_size=64,
                          shuffle=True, pin_memory=True, num_workers=10)

valid_data = datasets.ImageFolder(
    "/hdd/yejiandong/datasets/overlap-0.25-dataset/wsi_valid", transform=transform)
valid_loader = DataLoader(valid_data, batch_size=64,
                          shuffle=False, pin_memory=True, num_workers=10)

history = History(model, "inception", resume=False)
train_model(history, train_loader, valid_loader,
            loss_fn, optimizer_ft, num_epochs=30)
Exemple #16
0
def train(trn_loader, model, criterion, optimizer, epoch, logger, sublogger,
                   work_dir=os.path.join(args.work_dir, args.exp)):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    ious = AverageMeter()
    dices = AverageMeter()

    model.train()

    end = time.time()
    slice_level_acc = 0
    total_data_counts = 0
    lr_schedule = args.lr_schedule
    train_history = History(len(trn_loader.dataset))
    length_data = 0

    for i, (input, target, idx) in enumerate(trn_loader):
        data_time.update(time.time() - end)

        input = input.cuda()
        target = target.cuda()

        output = model(input)
        loss = criterion(output, target)

        # Segmentation Measure
        pos_probs = torch.sigmoid(output)
        pos_preds = (pos_probs > 0.5).float()

        p = 0
        k = 0
        correct_per_pixel = []
        for dap, predict in zip(target, pos_preds):
            if dap.max() == 1 and predict.max() == 1:
                p += 1
                k += 1
            elif dap.max() == 0 and predict.max() == 0:
                p += 1
                k += 1
            else:
                k += 1
        slice_level_acc += p
        total_data_counts += k

        iou, dice = performance(output, target)
        losses.update(loss.item(), input.size(0))
        ious.update(iou, input.size(0))
        dices.update(dice, input.size(0))

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        batch_time.update(time.time() - end)
        end = time.time()

        pos_probs = torch.sigmoid(output)
        pos_preds = (pos_probs > 0.5).float()
        #import ipdb; ipdb.set_trace()

        print('Epoch: [{0}][{1}/{2}]\t'
              'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
              'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
              'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
              'IoU {iou.val:.4f} ({iou.avg:.4f})\t'
              'Dice {dice.val:.4f} ({dice.avg:.4f})\t'.format(
            epoch, i, len(trn_loader), batch_time=batch_time,
            data_time=data_time, loss=losses,
            iou=ious, dice=dices))

        if i % 10 == 0:
            sublogger.write([epoch, i, loss.item(), iou, dice])

    slice_acc = slice_level_acc / total_data_counts

    writer_train.add_scalar('Loss', losses.avg, epoch)
    writer_train.add_scalar('IoU', ious.avg, epoch)
    writer_train.add_scalar('Dice Score', dices.avg, epoch)
    writer_train.add_scalar('Slice-Level-Accuracy', slice_level_acc / total_data_counts, epoch)

    logger.write([epoch, losses.avg, ious.avg, dices.avg, slice_acc])
Exemple #17
0
def pattern_selection(project_directory, **kwargs):
    logger = logging.getLogger(__name__)
    logger.info("BEGIN Pattern Selection")
    args = _set_parameters(**kwargs)
    start_time = time.time()
    _check_inputs(args['max_loci'], args['required_loci'],
                  args['exclude_loci'])
    history = History(project_directory.make_new_file(
        "history", "pattern_selection_history"),
                      "Pattern_Selection",
                      project_directory.timestamp,
                      param_dict=args)

    preprocessing_history = History(
        project_directory.get_parent_subdirectory_file(
            "history", "preprocessing_history_{}.txt".format(
                project_directory.get_parent_directory_timestamp())),
        "Preprocessing",
        exists=True)

    # Get JSON file path from preprocessing step
    json_file = preprocessing_history.get_path("PATTERN JSON")
    variant_matrix = preprocessing_history.get_path("VARIANT SITE MATRIX FILE")
    sep = {
        'comma': ",",
        "space": " ",
        "tab": "\t"
    }[preprocessing_history.get_parameter("SEP")]

    # Get flag file path from preprocessing step
    flag_file = preprocessing_history.get_path("PRIMER ZONE FLAGS")
    primer_zone_size = preprocessing_history.get_parameter("PZ_SIZE")

    history.add_path("PATTERN JSON", json_file)
    logger.info("Reading from pattern JSON: %s", json_file)
    # Read in pattern JSON
    patterns = Patterns()
    patterns.load_patterns(json_file)
    if len(args['exclude_loci']):
        patterns.remove_sites(args['exclude_loci'])
    if len(args['required_loci']):
        patterns.add_required_sites(args['required_loci'])
    if len(args['exclude_strains']):
        patterns.remove_strains(args['exclude_strains'])
    patterns.set_resolution(args['res'], args['stop_at_res'])
    best_set = _get_minimum_spanning_set(
        patterns, args['reps'], args['max_loci'], args['max_res'],
        args['n_threads'], int(preprocessing_history.get_parameter("PZ_SIZE")))

    haplotype_file = project_directory.make_new_file("minimum_spanning_set",
                                                     ".haplotype", "csv")
    amplicon_json = project_directory.make_new_file("minimum_spanning_set",
                                                    ".amplicons", "json")
    haplotype_matrix = project_directory.make_new_file("minimum_spanning_set",
                                                       "haplotypes", "csv")
    amplicon_matrix = project_directory.make_new_file("minimum_spanning_set",
                                                      "amplicons", "csv")
    pattern_matrix = project_directory.make_new_file("minimum_spanning_set",
                                                     "patterns", "csv")
    summary_file = project_directory.make_new_file("summary", "summary")

    haplotype = Haplotype(patterns, best_set, flag_file, primer_zone_size,
                          variant_matrix, sep)

    haplotype.write_haplotype(haplotype_file)
    history.add_path("Haplotype File", haplotype_file)

    haplotype.write_json(amplicon_json)
    history.add_path("Amplicon JSON", amplicon_json)

    haplotype.write_summary(summary_file)
    history.add_path("Summary", summary_file)

    haplotype.write_output(haplotype_matrix, pattern_matrix, amplicon_matrix)
    history.add_path("Haplotype Matrix", haplotype_matrix)
    history.add_path("Amplicon Matrix", amplicon_matrix)
    history.add_path("Pattern Matrix", pattern_matrix)

    logger.info("FINISHED Pattern Selection")
    run_time = time.time() - start_time
    history.add_other("Run Time", run_time)
    history.write()