Esempio n. 1
0
 def test(self):
     self.model.eval()
     avg_psnr = 0
     name = 0
     pae = 6
     with torch.no_grad():
         for batch_num, (data, target) in enumerate(self.testing_loader):
             data, target = data.to(self.device), target.to(self.device)
             prediction = self.model(data)
             loss = self.criterion(prediction, target)
             prediction = np.around(prediction.cpu() * 128 * pae * 0.7 +
                                    128)
             target = np.around(target.cpu() * 128 * pae * 0.7 + 128)
             mse = self.criterion(prediction, target)
             psnr = 10 * log10(65025 / mse.item())
             avg_psnr += psnr
             progress_bar(batch_num, len(self.testing_loader),
                          'PSNR: %.4f' % (avg_psnr / (batch_num + 1)))
         prediction = prediction.cpu().numpy()
         prediction = np.reshape(prediction,
                                 (prediction.shape[2], prediction.shape[3]))
         cv2.imwrite('./result/test.png', prediction)
     print("  Average PSNR: {:.4f} dB".format(avg_psnr /
                                              len(self.testing_loader)))
     return avg_psnr / len(self.testing_loader)
Esempio n. 2
0
    def valid(self):
        """
        data: [torch.cuda.FloatTensor], 10 batches: [10, 10, 10, 10, 10, 10, 10, 10, 10, 10]
        """
        self.model.eval()
        avg_psnr = 0
        avg_niqe = 0

        with torch.no_grad():
            for batch_num, (data, target) in enumerate(self.valid_loader):
                data = self.img_preprocess(data)  # resize input image size
                data, target = data.to(self.device), target.to(self.device)
                _, prediction = self.model(data)
                #calculate psnr
                mse = torch.mean(((prediction - target)**2), dim=[1, 2, 3])
                psnr = -10 * mse.log10().mean().item()
                avg_psnr += psnr
                #calculate niqe
                niqe = np.mean(
                    niqe_metric.niqe(
                        prediction.permute(0, 2, 3, 1).cpu().numpy() * 255,
                        RGB=True,
                        video_params=False))
                avg_niqe += niqe
                progress_bar(
                    batch_num, len(self.valid_loader),
                    'PSNR: %.3f || NIQE: %.3f' %
                    (avg_psnr / (batch_num + 1), avg_niqe / (batch_num + 1)))

        print("    Average PSNR: {:.4f} dB".format(avg_psnr /
                                                   len(self.valid_loader)))

        return avg_psnr
Esempio n. 3
0
    def progress_info(self):
        # Show the progress
        self.amount = len(self.link_list_set)
        scraped = len(self.scraped_list_set)
        print(f"=====\nOverall Progress for {self.domain}")
        progress_bar(scraped, self.amount)
        old = 0

        for link in self.link_list_set:
            if str(link) in self.scraped_list_set:
                old += 1
            else:
                pass
        diff = self.amount - old
        print("=====")
        print(f"{diff} new links found and ready for scraping")

        if go():
            print("Let's go!")
            pass
        else:
            y.graph()  # not that intereseting at the moment
            df = self.create_df()
            self.create_csv(df, self.seo_file)
            sys.exit()
Esempio n. 4
0
    def recurs(self):
        self.progress_info()
        count = 0
        for link in self.link_list_set.copy():
            print(f"Link : {link}")
            count += 1
            progress_bar(count, self.amount)
            try:
                if link in self.scraped_list_set.copy():
                    pass
                elif self.load(
                        link
                ):  # load(link) grabs robots to see if no-index in it if so, skip link
                    pass
                else:
                    self.load(link)
                    self.find(seo_tags)

                    # df = self.create_df()
                    # self.create_csv(df, self.seo_file, "a", False)

                    self.next_link()

            except:
                print(f"❌ Status Code für Link {link}")

        # Starting again
        self.recurs()
Esempio n. 5
0
    def train(self):
        self.model.train()
        train_loss = 0
        for batch_num, (data) in enumerate(self.training_loader):
            data, target = data[:, :2, ::3, ::3].to(
                self.device), data[:, :2].to(self.device)
            self.optimizer.zero_grad()
            loss = self.criterion(self.model(data), target)
            train_loss += loss.item()
            loss.backward()
            self.optimizer.step()
            progress_bar(
                batch_num, len(self.training_loader), 'Loss: %.4f, LR: %.4f' %
                (train_loss /
                 (batch_num + 1), self.optimizer.param_groups[0]['lr']))
            self.writer.add_scalar(
                'Train/TrainIterLoss_%s' % self.config.comment, loss.item(),
                self.epoch * len(self.training_loader) + batch_num)
            self.writer.add_scalar(
                'Train/TrainAvgLoss_%s' % self.config.comment,
                train_loss / (batch_num + 1),
                self.epoch * len(self.training_loader) + batch_num)

        print("    Average Loss: {:.4f}".format(train_loss /
                                                len(self.training_loader)))
Esempio n. 6
0
    def test(self):
        self.model.eval()
        avg_psnr = 0

        with torch.no_grad():
            for batch_num, (data) in enumerate(self.testing_loader):
                data, target = data[:, :2, ::3, ::3].to(
                    self.device), data[:, :2].to(self.device)
                data = F.interpolate(data,
                                     size=(target.shape[-2], target.shape[-1]))
                _, prediction = self.model(data)
                mse = self.criterion(prediction, target)
                avg_psnr += mse.item()
                progress_bar(batch_num, len(self.testing_loader),
                             'PSNR: %.4f' % (avg_psnr / (batch_num + 1)))
                self.writer.add_scalar(
                    'Valid/ValidIterLoss_%s' % self.config.comment, mse.item(),
                    self.epoch * len(self.testing_loader) + batch_num)
                self.writer.add_scalar(
                    'Valid/ValidAvgLoss_%s' % self.config.comment,
                    avg_psnr / (batch_num + 1),
                    self.epoch * len(self.testing_loader) + batch_num)

        print("    Average Loss: {:.4f}".format(avg_psnr /
                                                len(self.testing_loader)))
        return avg_psnr / len(self.testing_loader)
Esempio n. 7
0
    def train(self):
        self.model.train()
        train_loss = 0

        for batch_num, (data, target) in enumerate(self.training_loader):
            data = data.to(self.device)
            target = [
                target[i].to(self.device) for i in range(self.num_targets)
            ]

            self.optimizer.zero_grad()
            out = self.model(data)

            for i in range(self.num_targets):
                if i == 0:
                    loss = self.criterion(out[i], target[i])
                else:
                    loss += self.criterion(out[i], target[i])

            train_loss += loss.item()
            loss.backward()
            self.optimizer.step()
            progress_bar(batch_num, len(self.training_loader),
                         'Loss: {:.4f}'.format((train_loss / (batch_num + 1))))

        avg_train_loss = train_loss / len(self.training_loader)
        print("    Average Loss: {:.4f}".format(avg_train_loss))
        return avg_train_loss
Esempio n. 8
0
def main():
    # ===========================================================
    # Set train dataset & valid dataset
    # ===========================================================
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print('===> Loading datasets')
    if args.dataset == 'test':
        test_set = get_test_set(args.upscale_factor)
    elif args.dataset == 'valid':
        test_set = get_valid_set(args.upscale_factor)
    else:
        raise NotImplementedError
    test_data_loader = DataLoader(dataset=test_set,
                                  batch_size=args.batchSize,
                                  shuffle=False)

    file_name = args.model + "_generator.pth" if "gan" in args.model else "model.pth"
    model_name = args.model + ("_diff" if args.diff else "")
    model_path = "/home/teven/canvas/python/super-resolution/results/models/{}/{}".format(
        model_name, file_name)
    model = torch.load(model_path, map_location=lambda storage, loc: storage)
    model = model.to(device)
    model.eval()

    avg_psnr = 0
    avg_baseline_psnr = 0
    criterion = nn.MSELoss()

    with torch.no_grad():
        for batch_num, (data, target) in enumerate(test_data_loader):
            data, target = data.to(device), target.to(device)
            prediction = model(data)
            mse = criterion(prediction, target)
            psnr = 10 * log10(1 / mse.item())
            avg_psnr += psnr
            progress_bar(batch_num, len(test_data_loader),
                         'PSNR: %.3f' % (avg_psnr / (batch_num + 1)))

            baseline = F.interpolate(data,
                                     scale_factor=args.upscale_factor,
                                     mode='bilinear',
                                     align_corners=False)
            baseline_mse = criterion(baseline, target)
            baseline_psnr = 10 * log10(1 / baseline_mse.item())
            avg_baseline_psnr += baseline_psnr
            progress_bar(batch_num, len(test_data_loader),
                         'PSNR: %.3f' % (avg_baseline_psnr / (batch_num + 1)))

    print("    Average PSNR: {:.3f} dB".format(avg_psnr /
                                               len(test_data_loader)))
    print("    Average Baseline PSNR: {:.3f} dB".format(avg_baseline_psnr /
                                                        len(test_data_loader)))
Esempio n. 9
0
    def train(self):
        self.model.train()
        train_loss = 0
        for batch_num, (data, target) in enumerate(self.training_loader):
            data, target = data.to(self.device), target.to(self.device)
            self.optimizer.zero_grad()
            loss = self.criterion(self.model(data), target)
            train_loss += loss.item()
            loss.backward()
            self.optimizer.step()
            progress_bar(batch_num, len(self.training_loader), 'Loss: %.4f' % (train_loss / (batch_num + 1)))

        print("    Average Loss: {:.4f}".format(train_loss / len(self.training_loader)))
Esempio n. 10
0
def load_county(county_state_country,progress_bar=False):
    """
    Given a country name (str)/list, return a dictionary with time history data in a dataframe and rest in other fields
    """
    if isinstance(county_state_country,list):
        pass
    else:
        county_state_country = [county_state_country]

    for n, csc in enumerate(county_state_country):
        if not csc.endswith(', US'):
            county_state_country[n] = csc+', US'

    dfd = load_Counties()

    Combined_Keys = dfd['positive']['Combined_Key'].tolist()

     # Find location and population data
    aux = dfd['death'].T.loc[['Lat','Long_','Population','Combined_Key']]
    aux = aux.rename({'Lat':'lat','Long_':'lon','Population':'pop'},axis='index')
    aux.columns = aux.loc['Combined_Key'] # assign the combined key row as the columns
    aux = aux.drop('Combined_Key')        # now the combined key row is redundant since its embedded in defining the column names

    # Drop unused columns
    for key in dfd:
        if key is 'positive':
            dfd[key] = dfd[key].drop(columns=['UID','iso2','iso3','code3','FIPS','Province_State','Country_Region','Lat','Long_']).groupby('Combined_Key').sum()
        else:
            dfd[key] = dfd[key].drop(columns=['UID','iso2','iso3','code3','FIPS','Province_State','Country_Region','Lat','Long_','Population']).groupby('Combined_Key').sum()

    out = {}

    global mp_dic
    mp_dic = {
        'dfd': dfd,
        'aux': aux,
        'Combined_Keys':Combined_Keys
        }
    Nitems = len(county_state_country)
    Ncpu = min([mp.cpu_count(),Nitems]) # use maximal number of local CPUs
    chunksize = 1
    pool = mp.Pool(processes=Ncpu)
    
    for n,d in enumerate(pool.imap_unordered(mp_load_county,county_state_country,chunksize=chunksize)):
    # for n,c in enumerate(county_state_country):
    #     d = mp_load_county(c)
        if progress_bar:
            pb.progress_bar(n,-(-Nitems/chunksize)-1)
        out[d['key']]=d['data'];
    pool.terminate()
    return out
Esempio n. 11
0
    def test(self):
        self.model.eval()
        avg_psnr = 0

        with torch.no_grad():
            for batch_num, (data, target) in enumerate(self.testing_loader):
                data, target = data.to(self.device), target.to(self.device)
                prediction = self.model(data)
                mse = self.criterion(prediction, target)
                psnr = 10 * log10(1 / mse.item())
                avg_psnr += psnr
                progress_bar(batch_num, len(self.testing_loader), 'PSNR: %.4f' % (avg_psnr / (batch_num + 1)))

        print("    Average PSNR: {:.4f} dB".format(avg_psnr / len(self.testing_loader)))
Esempio n. 12
0
    def load_pre_built(self):
        querier = SQL_querier()
        numLines = querier.count_entries(self.graph_table_name)
        records = querier.select_table(self.graph_table_name)

        print("\nReading from as_graph table")
        progress = progress_bar(numLines)
        for record in records:
            current_as = AS(record.asn)
            current_as.customers = record.customers
            current_as.peers = record.peers
            current_as.providers = record.providers
            current_as.SCC_id = record.asn
            current_as.rank = record.rank

            if (current_as.rank not in self.ases_by_rank):
                self.ases_by_rank[current_as.rank] = list()
            self.ases_by_rank[current_as.rank].append(current_as.asn)
            self.strongly_connected_components[
                current_as.SCC_id] = record.members

            for asn in record.members:
                current_as.asn = asn
                self.ases[asn] = current_as
            progress.update()
        progress.finish()
        return
Esempio n. 13
0
    def combine_components(self):
        """Takes the SCCs of this graph and exchanges them for "super nodes".
            These super nodes have the providers, peers and customers than all
            nodes in the SCC would have. These providers, peers, and customers
            point to the new super node.


        """

        print("\nCombining Components")
        large_components = list()
        for component in self.strongly_connected_components:
            comp = self.strongly_connected_components[component]
            if(len(comp)>1):
                large_components.append(comp)
        
        progress = progress_bar(0,len(large_components))
        for component in large_components: 
            progress.next_job(len(component))

            #Create an AS using an "inner" AS to avoid collision
            #TODO maybe change it to some known unique value, ideally integer
            #grab ASN of first AS in component
            new_asn = self.ases[component[0]].SCC_id
            combined_AS = AS(new_asn)
            combined_AS.SCC_id = new_asn
            combined_cust_anns = list()
            combined_peer_prov_anns = list()

            #get providers, peers, customers from "inner" ASes
            #only if they aren't also in "inner" ASes
            for asn in component:
                for provider in self.ases[asn].providers:
                    if(provider not in component):
                        combined_AS.append_no_dup(combined_AS.providers,provider)
              #          combined_AS.add_neighbor(provider,0)
                        #replace old customer reference from provider
                        prov_AS = self.ases[provider]
                        prov_AS.customers.remove(asn)
                        prov_AS.append_no_dup(prov_AS.customers,new_asn)
                for peer in self.ases[asn].peers:
                    if(peer not in component): 
                        combined_AS.append_no_dup(combined_AS.peers,peer)
               #         combined_AS.add_neighbor(peer,1)
                        peer_AS = self.ases[peer]
                        peer_AS.peers.remove(asn)
                        peer_AS.append_no_dup(peer_AS.peers,new_asn)
                for customer in self.ases[asn].customers:
                    if(customer not in component):
                        combined_AS.append_no_dup(combined_AS.customers,customer)
                #        combined_AS.add_neighbor(customer,2)
                        cust_AS = self.ases[customer]
                        cust_AS.providers.remove(asn)
                        cust_AS.append_no_dup(cust_AS.providers,new_asn)
                self.ases[asn] = combined_AS
                progress.update()

            self.ases[combined_AS.asn] = combined_AS
        progress.finish()
        return
Esempio n. 14
0
def mosaic(tiles, pad=False, scatter=False, margin=0, scaled_margin=False,
           background=(255, 255, 255)):
    """Return the mosaic image."""
    # Infer dimensions so they don't have to be passed in the function call.
    dimensions = map(max, zip(*[(1 + tile.x, 1 + tile.y) for tile in tiles]))
    mosaic_size = map(lambda (x, y): x*y,
                         zip(*[tiles[0].ancestor_size, dimensions]))
    mos = Image.new('RGB', mosaic_size, background)
    pbar = progress_bar(len(tiles), "Scaling and placing tiles")
    random.shuffle(tiles)
    for tile in tiles:
        if tile.blank:
            pbar.next()
            continue
        if pad:
            size = shrink_by_lightness(pad, tile.size, tile.match['dL'])
            if margin == 0:
                margin = min(tile.size[0] - size[0], tile.size[1] - size[1])
        else:
            size = tile.size
        if scaled_margin:
            pos = tile_position(tile, size, scatter, margin//(1 + tile.depth))
        else:
            pos = tile_position(tile, size, scatter, margin)
        mos.paste(crop_to_fit(tile.match_img, size), pos)
        pbar.next()
    return mos
Esempio n. 15
0
def analyze(tiles):
    """Determine dominant colors of target tiles, and save that information
    in the Tile object."""
    pbar = progress_bar(len(tiles), "Analyzing images")
    for tile in tiles:
        analyze_one(tile)
        pbar.next()
Esempio n. 16
0
 def assemble(self, pad=False, scatter=False, margin=0, scaled_margin=False,
        background=(255, 255, 255), new_width=None):
     """Create the mosaic image.""" 
     # Infer dimensions so they don't have to be passed in the function call.
     mosaic_size = map(max, zip(*[(tile.x+tile.w, tile.y+tile.h) for tile in self.tiles]))
     if new_width is None:
         scale = 1.0
     else:
         scale = new_width / mosaic_size[0]
         mosaic_size = (new_width, int(scale * mosaic_size[1]))
     
     mos = Image.new('RGB', mosaic_size, background)
     pbar = progress_bar(len(self.tiles), "Scaling and placing tiles")
     random.shuffle(self.tiles)
     for tile in self.tiles:
         if False: #pad: TODO!
             size = shrink_by_lightness(pad, tile.size, tile.match['dL'])
             if margin == 0:
                 margin = min(tile.size[0] - size[0], tile.size[1] - size[1])
         else:
             size = int(tile.w*scale), int(tile.h*scale)
         if scaled_margin:
             pos = tile.get_position(size, scatter, margin//(1 + tile.depth)) #TODO
         else:
             pos = int(tile.x*scale), int(tile.y*scale) #tile.get_position(size, scatter, margin)
             
         fn = self.matches[tile][4]
         mi = Image.open(fn)
         mos.paste(crop_to_fit(mi, size), pos)
         pbar.next()
     self.mos = mos
Esempio n. 17
0
    def test(self):
        for i in range(self.numModels):
            self.models[i].eval()
        avg_psnr = 0

        with torch.no_grad():
            for batch_num, (datas, targets) in enumerate(self.testing_loader):
                for i in range(len(datas)):
                    data_unbaselined = datas[i if not self.
                                             predictColors else 0]
                    data = self.prepArr(data_unbaselined).to(self.device)

                    target_unbaselined = targets[i]
                    target = self.prepArr(target_unbaselined).to(self.device)

                    prediction = self.models[i if self.allLayers else 0](data)
                    prediction_squeezed = self.prepArr(prediction)
                    prediction_unbaselined = self.unprepArr(
                        prediction_squeezed, data_unbaselined)

                    mse = self.criterion(prediction_squeezed, target)
                    psnr = 10 * log10(1 / mse.item())
                    avg_psnr += psnr

                    global print1
                    if print1 and batch_num == 0 and theEpoch == self.nEpochs:

                        print("data shape ", data.shape)
                        plt.imshow(
                            self.unprepArr(data, data_unbaselined)[
                                0, 0, :, :].numpy()), plt.title('data')
                        plt.xticks([]), plt.yticks([])
                        plt.show()

                        print("prediction shape ", prediction.shape)
                        plt.imshow(prediction[
                            0, 0].detach().numpy()), plt.title('prediction')
                        plt.xticks([]), plt.yticks([])
                        plt.show()

                        if i == 2:
                            print1 = False
                progress_bar(batch_num, len(self.testing_loader),
                             'PSNR: %.4f' % (avg_psnr / (batch_num + 1)))

        print("    Average PSNR: {:.4f} dB".format(avg_psnr /
                                                   len(self.testing_loader)))
    def writeDataset(self,
                     data_dir: dir,
                     dataset_file: dir,
                     train_on_tiles=False):
        """
            Creates a dataset out of the images and labels found in data_dir and writes it to the dataset_file.
            Warning! It will overwrite the dataset_file!

            Inputs:
                data_dir : dir - folder which contains the pictures and labels
                dataset_file : dir - location where the dataset is to be written

            Outputs:
                None
        """
        pics = self.get_image_list(data_dir)

        images, labels = (0, 0)

        #random.seed(RANDOM_SEED)
        training_set_size = int(TRAINING_SET_RATIO * len(pics))
        training_pics = pics[:training_set_size]
        #training_pics = pics

        with tf.python_io.TFRecordWriter(dataset_file) as output_file:
            for i in range(len(training_pics)):
                picloc = training_pics[i]
                progress_bar(i + 1, len(training_pics), "Writing dataset")
                fullpicloc = join(data_dir, picloc)
                pic = scipy.ndimage.imread(fullpicloc, mode="L")
                label = get_bounding_box(fullpicloc)

                pic, label = self.preprocess(pic, label, train_on_tiles)

                pic_feature = _createBytesFeature(pic)
                label_feature = _createBytesFeature(np.asarray(label))

                feature = {
                    'train/image': pic_feature,
                    'train/label': label_feature
                }

                example = tf.train.Example(features=tf.train.Features(
                    feature=feature))
                output_file.write(example.SerializeToString())
Esempio n. 19
0
    def train(self):
        self.model.train()
        train_loss = 0
        for batch_num, (data) in enumerate(self.training_loader):
            data, target = data[:, :2, ::3, ::3].to(
                self.device), data[:, :2].to(self.device)
            data = F.interpolate(data,
                                 size=(target.shape[-2], target.shape[-1]))
            target_d, output = self.model(data)

            # loss1
            loss_1 = 0
            for d in range(self.num_recursions):
                loss_1 += (self.criterion(target_d[d], target) /
                           self.num_recursions)

            # loss2
            loss_2 = self.criterion(output, target)

            # regularization
            reg_term = 0
            for theta in self.model.parameters():
                reg_term += torch.mean(torch.sum(theta**2))

            # total loss
            loss = self.loss_alpha * loss_1 + (
                1 - self.loss_alpha) * loss_2 + self.loss_beta * reg_term
            loss.backward()

            train_loss += loss.item()
            self.optimizer.step()
            progress_bar(
                batch_num, len(self.training_loader), 'Loss: %.4f, LR: %.4f' %
                (train_loss /
                 (batch_num + 1), self.optimizer.param_groups[0]['lr']))
            self.writer.add_scalar(
                'Train/TrainIterLoss_%s' % self.config.comment, loss.item(),
                self.epoch * len(self.training_loader) + batch_num)
            self.writer.add_scalar(
                'Train/TrainAvgLoss_%s' % self.config.comment,
                train_loss / (batch_num + 1),
                self.epoch * len(self.training_loader) + batch_num)

        print("    Average Loss: {:.4f}".format(train_loss /
                                                len(self.training_loader)))
Esempio n. 20
0
    def add_directory(self, image_dir, skip_errors=True):
        walker = DirectoryWalker(image_dir)
        file_count = len(list(walker)) # stupid but needed but progress bar
        pbar = progress_bar(file_count, "Analyzing images and building db")

        for filename in walker:
            self.add_image(filename, skip_errors)
            pbar.next()
        logger.info('Collection %s built with %d images'%(self.db_name, len(self)))
 def backpropagation(self):
     grad_dim1 = np.zeros((785, 50))
     grad_dim2 = np.zeros((51, 50))
     grad_dim3 = np.zeros((51, 10))
     for i in range(self.m):
         ex = self.images[i]
         # First layer forward prop
         z2 = np.matmul(self.THETA1.T, ex)
         a2 = self.sigmoid(z2)
         a2 = np.insert(a2, 0, 1, axis=0)
         # Second layer forward prop
         z3 = np.matmul(self.THETA2.T, a2)
         a3 = self.sigmoid(z3)
         a3 = np.insert(a3, 0, 1, axis=0)
         # Output layer forward prop
         z4 = np.matmul(self.THETA3.T, a3)
         a4 = self.sigmoid(z4)
         # Backprop first layer
         delta4 = a4 - self.labels[i]
         # Backprop second layer
         delta3 = np.multiply(np.matmul(self.THETA3, delta4),
                              np.multiply(a3, 1 - a3))
         # Backprop third layer
         delta2 = np.multiply(np.matmul(self.THETA2, delta3[1:]),
                              np.multiply(a2, 1 - a2))
         grad1 = np.matmul(ex.reshape(785, 1), delta2[1:].reshape(1, 50))
         grad2 = np.matmul(a2.reshape(51, 1), delta3[1:].reshape(1, 50))
         grad3 = np.matmul(a3.reshape(51, 1), delta4.reshape(1, 10))
         grad_dim1 += grad1
         grad_dim2 += grad2
         grad_dim3 += grad3
         progress_bar(100, int(i / (self.m / 100)), "Backpropagation")
     print("")
     grad_dim1 = (1 / self.m) * grad_dim1
     grad_dim2 = (1 / self.m) * grad_dim2
     grad_dim3 = (1 / self.m) * grad_dim3
     grad_dim1[1:] = grad_dim1[1:] + (self.regul_lambda /
                                      self.m) * self.THETA1[1:]
     grad_dim2[1:] = grad_dim2[1:] + (self.regul_lambda /
                                      self.m) * self.THETA2[1:]
     grad_dim3[1:] = grad_dim3[1:] + (self.regul_lambda /
                                      self.m) * self.THETA3[1:]
     return np.array([grad_dim1, grad_dim2, grad_dim3], dtype=object)
Esempio n. 22
0
def load_country(country, progress_bar=False, chunksize=1):
    """
    Given a country name/s (str/list), return a dictionary with the data for that country
    """

    if isinstance(country, list):
        pass
    else:
        country = [country]

    out = {}

    dfd = load_Global()

    pop = get_country_population(country)
    lonlat = get_country_location(country)
    Combined_Keys = dfd['positive']['Country/Region'].tolist()

    for key in dfd:
        dfd[key] = dfd[key].drop(columns=['Province/State', 'Lat', 'Long'
                                          ]).groupby('Country/Region').sum()

    global mp_dic
    mp_dic = {
        'dfd': dfd,
        'pop': pop,
        'lonlat': lonlat,
        'Combined_Keys': Combined_Keys,
    }
    Nitems = len(country)
    Ncpu = min([mp.cpu_count(), Nitems])  # use maximal number of local CPUs
    chunksize = 1
    pool = mp.Pool(processes=Ncpu)

    #for n,d in enumerate(pool.imap_unordered(mp_load_country,country,chunksize=chunksize)):
    for n, do in enumerate(country):
        d = mp_load_country(do)

        if progress_bar:
            pb.progress_bar(n, -(-Nitems / chunksize) - 1)
        out[d['key']] = d['data']
    pool.terminate()
    return out
Esempio n. 23
0
    def test(self):
        """
        data: [torch.cuda.FloatTensor], 10 batches: [10, 10, 10, 10, 10, 10, 10, 10, 10, 10]
        """
        self.model.eval()
        avg_psnr = 0

        with torch.no_grad():
            for batch_num, (data, target) in enumerate(self.testing_loader):
                data = self.img_preprocess(data)  # resize input image size
                data, target = data.to(self.device), target.to(self.device)
                _, prediction = self.model(data)
                mse = self.criterion(prediction, target)
                psnr = 10 * log10(1 / mse.item())
                avg_psnr += psnr
                progress_bar(batch_num, len(self.testing_loader),
                             'PSNR: %.4f' % (avg_psnr / (batch_num + 1)))

        print("    Average PSNR: {:.4f} dB".format(avg_psnr /
                                                   len(self.testing_loader)))
Esempio n. 24
0
 def match(self, tolerance=1, usage_penalty=1, usage_impunity=2):
     """Assign each tile a new image, and open that image in the Tile object."""
     if len(self.pool)==0:
         logger.error('No images in pool to match!')
         exit(-1)
     self.pool.reset_usage()
     
     pbar = progress_bar(len(self.tiles), "Choosing and loading matching images")
     for tile in self.tiles:
         self.match_one(tile, tolerance, usage_penalty, usage_impunity)
         pbar.next()
Esempio n. 25
0
def createLabelFiles(path: dir):
    samples_with_label = collectSamplesWithLabel(path, 'license plate')
    number_of_samples = len(samples_with_label)
    i = 0
    for file_loc in samples_with_label:
        progress_bar(i, number_of_samples, 'Converting labels')
        example = Example(file_loc)
        bb_label_loc = example.basename + ".txt"
        if not os.path.isfile(bb_label_loc):
            license_plate_mask = [50, 159]
            license_plates = findBoundingBox(example, 'license plate',
                                             license_plate_mask)
            license_plates_as_list = map(lambda x: x[1].getAsList(),
                                         license_plates.items())
            with open(bb_label_loc, 'w') as f:
                for license_plate_coords in license_plates_as_list:
                    line = str(license_plate_coords)[
                        1:-1]  # get rid of leading '[' and trailing ']'
                    f.write(line + '\n')
        i += 1
    progress_bar(number_of_samples, number_of_samples, 'Converting label')
Esempio n. 26
0
def extract_embeddings_from_directory(unknown_photo_directory,
                                      alpha_named_files=True):
    '''
    Takes the name of a directory
    Creates a list of embeddings of all photos in the directory
    Creates a list of the file names in the directory
    '''
    count = 1
    embeddings = []
    img_files = []
    file_list = os.listdir(unknown_photo_directory)
    print('Extracting embeddings from ' + unknown_photo_directory)
    for img_file in file_list:
        full_filename = unknown_photo_directory + '/' + img_file
        progress_bar(count, len(file_list))
        #print("Loading file ",count," of ",len(file_list),'\t',img_file)
        count += 1
        # Load image file
        try:
            img = face_recognition.load_image_file(full_filename)
        except (ValueError, OSError) as e:
            print("Failed to open file ", img_file, file=sys.stderr)
            continue
        # Extract embedding
        try:
            encoding = face_recognition.face_encodings(img)[0]
        except IndexError:
            print("Unable to locate face in photo. Check image file",
                  img_file,
                  file=sys.stderr)
            continue
        # Add to list
        embeddings.append(encoding)
        if alpha_named_files:
            alpha = img_file[:6]
            img_files.append(alpha)
        else:
            img_files.append(img_file)
    print('\r' + ' ' * 80 + '\r' + '\033[2A')
    return embeddings, img_files
Esempio n. 27
0
def afterburner_train(language,
                      phase,
                      release,
                      model_fn,
                      new_model_fn,
                      epochs,
                      batch_size=32):
    C, model, SRC, TRG, device, train_iterator, _ = afterburner_pretrained_model(
        language, phase, release, model_fn, batch_size)
    LEARNING_RATE = 0.0005
    optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
    TRG_PAD_IDX = TRG.vocab.stoi[TRG.pad_token]
    criterion = torch.nn.CrossEntropyLoss(ignore_index=TRG_PAD_IDX)
    model.train()
    print(f'{len(train_iterator)} batches / epoch')
    epoch_loss = 9999999999999999
    fig, ax = plt.subplots(1, 1)
    ax.set_xlabel('epochs')
    ax.set_ylabel('Loss')
    losses = []
    for j in tqdm(range(epochs)):
        epoch_loss = 0
        for i, batch in enumerate(tqdm(train_iterator)):
            src = batch.src.to(device)
            trg = batch.trg.to(device)
            optimizer.zero_grad()
            output, _ = model(src, trg[:, :-1])
            output_dim = output.shape[-1]
            output = output.contiguous().view(-1, output_dim)
            trg = trg[:, 1:].contiguous().view(-1)
            loss = criterion(output, trg)
            loss.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
            optimizer.step()
            epoch_loss += loss.item()
        losses.append(epoch_loss)
        progress_bar(fig, ax, losses)
        torch.save(model.state_dict(), f'{new_model_fn}.{j}')
    torch.save(model.state_dict(), new_model_fn)
Esempio n. 28
0
    def train(self):
        # models setup
        self.netG.train()
        self.netD.train()
        g_train_loss = 0
        d_train_loss = 0
        for batch_num, (data, target) in enumerate(self.training_loader):
            # setup noise
            real_label = torch.ones(data.size(0), data.size(1)).to(self.device)
            fake_label = torch.zeros(data.size(0), data.size(1)).to(self.device)
            data, target = data.to(self.device), target.to(self.device)

            # Train Discriminator
            self.optimizerD.zero_grad()
            d_real = self.netD(target)
            d_real_loss = self.criterionD(d_real, real_label)

            d_fake = self.netD(self.netG(data))
            d_fake_loss = self.criterionD(d_fake, fake_label)
            d_total = d_real_loss + d_fake_loss
            d_train_loss += d_total.item()
            d_total.backward()
            self.optimizerD.step()

            # Train generator
            self.optimizerG.zero_grad()
            g_real = self.netG(data)
            g_fake = self.netD(g_real)
            gan_loss = self.criterionD(g_fake, real_label)
            mse_loss = self.criterionG(g_real, target)

            g_total = mse_loss + 1e-3 * gan_loss
            g_train_loss += g_total.item()
            g_total.backward()
            self.optimizerG.step()

            progress_bar(batch_num, len(self.training_loader), 'G_Loss: %.4f | D_Loss: %.4f' % (g_train_loss / (batch_num + 1), d_train_loss / (batch_num + 1)))

        print("    Average G_Loss: {:.4f}".format(g_train_loss / len(self.training_loader)))
Esempio n. 29
0
def test(path, testing_loader, pae):
    model = torch.load(path)

    model.eval()
    mse = torch.nn.MSELoss()
    avg_ssim = 0
    avg_psnr = 0
    max_bound = 0
    with torch.no_grad():
        for batch_num, (data, target) in enumerate(testing_loader):
            data, target = data.cuda(), target.cuda()
            prediction = model(data)
            prediction = (prediction.cpu() * 128 * pae * 0.7 + 128)
            target = (target.cpu() * 128 * pae * 0.7 + 128)

            mse_value = mse(prediction, target)
            psnr = 10 * log10(65025 / mse_value.item())
            avg_psnr += psnr
            progress_bar(batch_num, len(testing_loader),
                         'PSNR: %.4f' % (avg_psnr / (batch_num + 1)))
            abs_value = np.abs(prediction - target)
            abs_value = abs_value.numpy()
            if abs_value.max() > max_bound:

                max_bound = abs_value.max()
            prediction = prediction.numpy()

            prediction = np.reshape(prediction,
                                    (prediction.shape[2], prediction.shape[3]))
            target = np.reshape(target, (target.shape[2], target.shape[3]))
            ssim = compute_ssim(prediction, target)
            avg_ssim += ssim
            # break

    print("  Average PSNR: {:.4f} dB".format(avg_psnr / len(testing_loader)))
    print("  Average SSIM: {:.4f} dB".format(avg_ssim / len(testing_loader)))
    print("bound: " + str(max_bound))
    return avg_psnr / len(testing_loader), avg_ssim / len(
        testing_loader), max_bound
Esempio n. 30
0
    def partition_tiles(self, dimensions=10, depth=0, hdr=80, analyze=True):
        "Partition the target image into a list of Tile objects."
        self.p = Partition(self.img, self.mask)
        self.p.simple_partition(dimensions)
        self.p.recursive_split(depth, hdr)    
            
        self.tiles = self.p.get_tiles()

        if not analyze:
            return
        pbar = progress_bar(len(self.tiles), "Analyzing images")
        for tile in self.tiles:
            self.analyze_one(tile)
            pbar.next()
Esempio n. 31
0
    def train(self):
        """
        data: [torch.cuda.FloatTensor], 4 batches: [64, 64, 64, 8]
        """
        self.model.train()
        train_loss = 0
        for batch_num, (data, target) in enumerate(self.training_loader):
            data = self.img_preprocess(data)  # resize input image size
            data, target = data.to(self.device), target.to(self.device)
            target_d, output = self.model(data)

            # loss1
            loss_1 = 0
            for d in range(self.num_recursions):
                loss_1 += (self.criterion(target_d[d], target) /
                           self.num_recursions)

            # loss2
            loss_2 = self.criterion(output, target)

            # regularization
            reg_term = 0
            for theta in self.model.parameters():
                reg_term += torch.mean(torch.sum(theta**2))

            # total loss
            loss = self.loss_alpha * loss_1 + (
                1 - self.loss_alpha) * loss_2 + self.loss_beta * reg_term
            loss.backward()

            train_loss += loss.item()
            self.optimizer.step()
            progress_bar(batch_num, len(self.training_loader),
                         'Loss: %.4f' % (train_loss / (batch_num + 1)))

        print("    Average Loss: {:.4f}".format(train_loss /
                                                len(self.training_loader)))
Esempio n. 32
0
def matchmaker(tiles, db_name, tolerance=1, usage_penalty=1, usage_impunity=2):
    """Assign each tile a new image, and open that image in the Tile object."""
    db = connect(db_name)
    try:
        reset_usage(db)
        pbar = progress_bar(len(tiles), "Choosing and loading matching images")
        for tile in tiles:
            if tile.blank:
                pbar.next()
                continue
            tile.match = choose_match(tile.lab, db, tolerance,
                usage_penalty if tile.depth < usage_impunity else 0)
            pbar.next()
    finally:
        db.close()
Esempio n. 33
0
    def train(self):
        self.model.train()
        train_loss = 0
        for batch_num, (data, target) in enumerate(self.training_loader):
            data, target = data.to(self.device), target.to(self.device)
            self.optimizer.zero_grad()
            loss = self.criterion(self.model(data), target)
            train_loss += loss.item()
            loss.backward()
            self.optimizer.step()
            total_time = progress_bar(
                batch_num, len(self.training_loader),
                'Loss: %.4f' % (train_loss / (batch_num + 1)))

        avg_loss = train_loss / len(self.training_loader)
        return [avg_loss, total_time]
Esempio n. 34
0
    def save_anns_to_db(self):
        print("Saving Propagation results to DB")
        progress = progress_bar(len(self.graph.ases))
        start_time = time.time()

        for asn in self.graph.ases:
            AS = self.graph.ases[asn]
            #    if asn == AS.SCC_id:
            sql_anns_arg = AS.anns_to_sql()
            self.querier.insert_results(asn, sql_anns_arg)
            progress.update()
        progress.finish()
        end_time = time.time()
        print("Time To Save Announcements: " + str(end_time - start_time) +
              "s")
        return
Esempio n. 35
0
def download_video(url, folder):
    try:
        yt = YouTube(url)

    except Exception as e:
        print ("Error:", e, "- skipping video with url:",url)
        return

    #video should be downloaded in 720p
    try:
        vid = yt.get("mp4","720p")

    #else tries to get the highest resolution available
    except Exception:
        vid = yt.filter("mp4")[-1]

    #download video
    try:
        bar = progress_bar()#init progress_bar
        vid.download(folder,on_progress = bar.print_progress, on_finish = bar.print_end)
        print("Successfully downloaded", yt.filename, " !")

    except OSError:
        print(yt.filename, "already exists in the directory. Skipping video...")
        return
    
    #converts video to audio
    try:

            aud = "ffmpeg -i " + folder + "/" +  "\"" +  str(yt.filename) + "\"" + ".mp4 " + folder + "/" + "\""  + str(yt.filename) + "\"" + ".mp3"
            print (aud)
            os.system(aud)

            if os.path.exists(folder +"\\" + yt.filename + ".mp4"):
                os.remove(folder +"\\" + yt.filename + ".mp4")

            print("Succesfully converted",yt.filename, "to mp3!")

    except OSError:
        print("There are some problems with the file name(s), skipping video..")
        return
Esempio n. 36
0
def pool(image_dir, db_name):
    """Analyze all the images in image_dir, and store the results in
    a sqlite database at db_name."""
    db = connect(db_name)
    try:
        create_tables(db)
        walker = DirectoryWalker(image_dir)
        file_count = len(list(walker)) # stupid but needed but progress bar
        pbar = progress_bar(file_count, "Analyzing images and building db")
        walker = DirectoryWalker(image_dir)
        for filename in walker:
            if in_db(filename, db):
                logger.warning("Image %s is already in the table. Skipping it."%filename)
                pbar.next()
                continue
            try:
                img = Image.open(filename)
            except IOError:
                logger.warning("Cannot open %s as an image. Skipping it.",
                               filename)
                pbar.next()
                continue
            if img.mode != 'RGB':
                logger.warning("RGB images only. Skipping %s.", filename)
                pbar.next()
                continue
            w, h = img.size
            try:
                regions = split_quadrants(img)
                rgb = map(dominant_color, regions)
                lab = map(cs.rgb2lab, rgb)
            except:
                logger.warning("Unknown problem analyzing %s. Skipping it.",
                               filename)
                continue
            insert(filename, w, h, rgb, lab, db)
            pbar.next()
        db.commit()
        logger.info('Collection %s built with %d images'%(db_name, get_size(db)))
    finally:
        db.close()
Esempio n. 37
0
def fit(
    fn,
    cor,
    tmin,
    tmax,
    filestub=None,
    bootstraps=NBOOTSTRAPS,
    return_quality=False,
    return_chi=False,
    writecor=True,
    tstride=1,
    options=None,
):
    if tmax - tmin < len(fn.parameter_names):
        raise InvalidFit("Can not fit to less points than parameters")

    if options.random:
        logging.info("Setting random seed to %s", options.random)
        np.random.seed(options.random)

    eval_file = None
    results = logging.getLogger("results")
    if filestub and not results.handlers:
        filename = filestub + ".stats"
        filehandler = logging.FileHandler(filename)
        filehandler.level = OUTPUT
        results.addHandler(filehandler)
        logging.info("Writing output to file {}".format(filename))

    if filestub:
        eval_filename = filestub + ".evals"
        eval_file = open(eval_filename, "w")
        eval_file.write("# evals seqeuntial\n")

        tstride_filename = filestub + ".tstride"
        tstride_file = open(tstride_filename, "w")
        tstride_file.write("{}\n".format(tstride))

    results.info(
        "Fitting data to {} from t={} to t={} using {} bootstrap samples".format(fn.description, tmin, tmax, bootstraps)
    )

    if fn.subtract:
        logging.debug("before suctracted correlator is:")
        logging.debug(cor.average_sub_vev())
        ccor = deepcopy(cor)
        cor = ccor
        fn.subtract = tmin - 1
        cor.subtract(tmin - 1)
        logging.debug("subtracted correlator is:")
        logging.debug(cor.average_sub_vev())

    # tmax = tmax+1  # I use ranges, so this needs to be offset by one
    fitrange = range(tmin, tmax + 1, tstride)
    fun = lambda v, mx, my: (fn.formula(v, mx) - my)
    initial_guess = fn.starting_guess(cor, options.period, tmax, tmin)
    logging.info("Starting with initial_guess: {}".format(repr(initial_guess)))

    try:
        ranges = []
        for i in fn.indexes:
            ranges.extend(range(i[0], i[1] + 1, tstride))
        fitrange = ranges
    except AttributeError:
        logging.info("no indexes on fit function, using normal fitrange")

    x = np.array(fitrange)
    dof = len(x) - len(fn.parameter_names)
    orig_ave_cor = cor.average_sub_vev()
    y = [orig_ave_cor[t] for t in fitrange]
    logging.info("x {}".format(x))
    logging.info("y {}".format(y))
    original_ensamble_params, success = leastsq(fun, initial_guess, args=(x, y), maxfev=10000)

    original_cov = covariance_matrix(cor, fitrange)
    logging.info("inverting original cov")
    igonre_error_original_cov = options.debug_ignoreinverterror or options.debug_uncorrelated
    inv_original_cov = bestInverse(original_cov, print_error=True, ignore_error=igonre_error_original_cov)

    logging.info("original ensemble full cov")
    matrix_stats(original_cov, None, cond=True)

    if options.debug_singlecov:
        logging.info("original ensemble single cov")
        original_cov = covariance_matrix(cor, fitrange)
        inv_original_cov = bestInverse(original_cov, print_error=True, ignore_error=options.debug_ignoreinverterror)
        matrix_stats(original_cov, eval_file, cond=True)

    if options.debug_singleuncorrelated:
        logging.debug("Using uncorrlated")
        jke = cor.jackknifed_errors()
        original_cov = np.diag([jke[t] ** 2 for t in fitrange])
        inv_original_cov = bestInverse(original_cov, print_error=True, ignore_error=options.debug_ignoreinverterror)
        matrix_stats(original_cov, eval_file, cond=True)

    if options.debug_outputcov:

        def invert_error(M, i):
            return np.max(np.abs((np.dot(M, i) - np.identity(len(i)))))

        def invert_error_one(M, i):
            return np.sum(np.abs((np.dot(M, i) - np.identity(len(i)))))

        def invert_error_two(M, i):
            return np.sum(((np.dot(M, i) - np.identity(len(i)))) ** 2)

        logging.info("inv=\n{}".format(inv_original_cov))
        logging.info("invert errors:")
        logging.info("inv error max norm {}".format(invert_error(original_cov, inv_original_cov)))
        logging.info("inv error one norm {}".format(invert_error_one(original_cov, inv_original_cov)))
        logging.info("inv error two norm {}".format(invert_error_two(original_cov, inv_original_cov)))
        exit(0)

    if options.debugguess:
        # return original_ensamble_params, [0.01, 0.01, 0.01, 0.01] # For testing initila guess in plot
        if options.plot:
            plot_fit(fn, cor, tmin, tmax, options, initial_guess)
        return initial_guess, [0.01, 0.01, 0.01, 0.01]  # For testing initila guess in plot
    if not success:
        raise InvalidFit("original exnamble leastsq failed")
    if options.first_pass:
        initial_guess = original_ensamble_params
        logging.info("initial_guess after first pass: {}".format(repr(initial_guess)))

    def cov_fit(correlator, guess):
        ave_cor = correlator.average_sub_vev()
        y = [ave_cor[t] for t in fitrange]

        if options.debug_uncorrelated:
            logging.debug("Using uncorrlated")
            fcov = covariance_matrix(correlator, fitrange)
            cov = np.diag(np.diag(fcov))
            # jke = correlator.jackknifed_errors()
            # cov = np.diag([jke[t]**2 for t in fitrange])

        elif options.debug_singlecov:
            cov = original_cov
        else:
            cov = covariance_matrix(correlator, fitrange)

        matrix_stats(cov, eval_file)

        if options.debug_singlecov or options.debug_singleuncorrelated:
            inv_cov = inv_original_cov
        else:
            inv_cov = bestInverse(cov, ignore_error=options.debug_ignoreinverterror)

        matrix_stats(cov, eval_file)

        if options.debug_identcov:
            results.log(30, "using identcov debug option")
            inv_cov = np.identity(len(cov))

        aoc = np.array([ave_cor[t] for t in fitrange])
        # logging.debug("guess {}".format(str(guess)))

        def cov_fun(g):
            """ Function to be minizied. computed using matrix mult"""
            vect = aoc - fn.formula(g, x)
            return vect.dot(inv_cov).dot(vect)

        if options.first_pass:
            uncorrelated_fit_values, success = leastsq(fun, guess, args=(x, y), maxfev=100000)
            if not success:
                raise InvalidFit("leastsq failed")
            logging.debug("firstpass guess {}".format(str(uncorrelated_fit_values)))
            if guess[0] < 0.0:
                logging.warn("first pass found mass to be negative {}, lets not use it".format(guess[0]))
            else:
                guess = uncorrelated_fit_values

            if len(guess) > 2 and guess[2] < 0.0:
                logging.warn("first pass found mass2 to be negative {}, lets flip it".format(guess[2]))
                logging.info("first pass results are {}".format(repr(guess)))
                guess[2] = -guess[2]

        def clamp(n, minn, maxn):
            return max(min(maxn, n), minn)

        bounded_guess = [clamp(g, b[0], b[1]) for g, b in zip(guess, fn.bounds)]
        # logging.debug("guess {}, bounded guess {}".format(repr(guess), repr(bounded_guess)))

        m = fn.custom_minuit(aoc, inv_cov, x, guess=bounded_guess)
        # m.set_strategy(2)
        migradinfo = m.migrad()
        minuit_results = [m.values[name] for name in fn.parameter_names]
        chisqr = migradinfo[0]["fval"]
        if m.get_fmin().is_valid:
            return minuit_results, chisqr
        else:
            logging.error("minuit failed!!")
            logging.error("was at {}".format(minuit_results))
            raise InvalidFit("minuit failed")

    # end cov_fit

    original_ensamble_correlatedfit, original_ensamble_chisqr = cov_fit(cor, initial_guess)
    isvalidfit = fn.valid(original_ensamble_correlatedfit)
    if not isvalidfit:
        raise InvalidFit("Full ensamble failed")

    boot_params = []
    boot_chisqr = []
    failcount = 0
    attempted = 0

    for strap in bootstrap_ensamble(cor, N=bootstraps, filelog=filestub, jackknife=options.jackknife):

        if options.jackknife:
            bootstraps = len(cor.configs)

        pb = progress_bar.progress_bar(bootstraps)

        attempted += 1
        pb.update(attempted)
        if options.reguess:
            newguess = fn.starting_guess(strap, options.period, tmax, tmin)
        else:
            newguess = initial_guess
        try:
            fitted_params, fitted_chisqr = cov_fit(strap, newguess)
        except (InversionError, InvalidFit) as e:
            if options.debug_ignoreinverterror:
                fitted_params = None
            else:
                raise e
        if fitted_params is not None:
            boot_params.append(fitted_params)
            boot_chisqr.append(fitted_chisqr)
            logging.debug("bootstrap converged")
            if options.write_each_boot:
                write_fitted_cor(
                    fn, strap, tmin, tmax, options, fitted_params, postfix=".bootstrap{}".format(attempted)
                )
            if options.debug:
                plot_fit(fn, strap, tmin, tmax, options, fitted_params, postfix=".bootstrap{}".format(attempted))
        else:
            logging.error("bootstrap failed to converge!")
            # raise InvalidFit("one bootstrap failed")
            # raw_input("test")
            failcount += 1
            logging.debug("fails:{} attempts:{}, ratio:{}".format(failcount, attempted, failcount / float(attempted)))
            # if failcount/float(attempted) > 0.15 and attempted > 40:
            #     raise InvalidFit("more than 20% of boostraps failed to converge")
        del strap
    pb.done()

    if failcount > 0:
        logging.warn("{} bootstraps did not converge!".format(bootstraps - len(boot_params)))
    if len(boot_params) < bootstraps * 0.9:
        logging.error("More that 10% of the straps failed")
        raise InvalidFit("more than 10% of boostraps failed to converge")

    if options.histo:
        plot_histograms(fn.parameter_names, boot_params, options)

    results.info("")
    results.info("Uncorelated total fit: %s", {n: p for n, p in zip(fn.parameter_names, original_ensamble_params)})
    results.info(
        "Correlated total fit:  %s", {n: p for n, p in zip(fn.parameter_names, original_ensamble_correlatedfit)}
    )

    factor = 1
    if options.jackknife:
        factor = np.sqrt(len(cor.configs) - 1)

    boot_averages = np.mean(boot_params, 0)
    boot_std = factor * np.std(boot_params, 0)
    boota = np.array(boot_params)
    upper_quartiles = [stats.scoreatpercentile(boota[:, i], 75) for i in range(len(boot_averages))]
    medians = [stats.scoreatpercentile(boota[:, i], 50) for i in range(len(boot_averages))]
    lower_quartiles = [stats.scoreatpercentile(boota[:, i], 25) for i in range(len(boot_averages))]
    inter_range = [
        stats.scoreatpercentile(boota[:, i], 75) - stats.scoreatpercentile(boota[:, i], 25)
        for i in range(len(boot_averages))
    ]

    for name, boot, original, err in zip(fn.parameter_names, boot_averages, original_ensamble_correlatedfit, boot_std):
        bias = abs(boot - original)
        percent_bias = abs(boot - original) / original
        results.info("Bootstrap Bias in {:<10}: {:.3%}".format(name, percent_bias))
        if bias > err * 2 and (err > 0.0):
            results.error("Bootstrap Bias in {:<10}: {:.3%}".format(name, percent_bias))
            results.error(
                "Bootstrap average does not agree with ensamble average!"
                "\nNot enough statistics for this for to be valid!!!\n"
            )
            if not options.unsafe:
                results.critical("Exiting! Run with --unsafe to fit anyway")
                raise InvalidFit("Bootstrap average does not agree with ensamble average")

    for name, ave, med, std, iqr in zip(fn.parameter_names, boot_averages, medians, boot_std, inter_range):
        skew = abs(ave - med) / ave
        dist_skew = abs(std - iqr) / iqr
        if skew > 1.0:
            results.error("{}: diff of bstrap average and bstrap med is {:.3%}".format(name, skew))
            results.error("Bootstrap distrubtion is skewed!!")
            if not options.unsafe:
                results.critical("Exiting! Run with --unsafe to fit anyway")
                raise InvalidFit("Bootstrap average does not agree with ensamble average")
        else:
            results.info("{}: diff of bstrap average and bstrap med is {:.3%}".format(name, skew))

    results.info("")
    try:
        results.log(OUTPUT, "Fit ranges ({}), ({})".format(*fn.ranges))
    except Exception as e:
        pass

    results.log(OUTPUT, "Full ensemble fitted parameters t={}---------------------".format(fitrange))
    results.log(OUTPUT, "Name      : Average,        STD,           (1st Quart, Median, 3rd Quart, IQR)")
    for name, ave, std, low, med, up, iqr in zip(
        fn.parameter_names, boot_averages, boot_std, upper_quartiles, medians, lower_quartiles, inter_range
    ):
        results.log(
            OUTPUT,
            u"{:<10}: {:<15.10f} \u00b1 {:<10g}   ({:<9.6f}, {:<9.6f}, {:<9.6f}, {:<9.6f})".format(
                name, ave, std, low, med, up, iqr
            ),
        )
    results.log(OUTPUT, "--------------------------------------------------------")

    v = original_ensamble_correlatedfit

    chi_average = np.mean(boot_chisqr, 0)
    chi_median = np.median(boot_chisqr, 0)
    chi_min = min(boot_chisqr)
    chi_std = np.std(boot_chisqr, 0)
    chi_range = stats.scoreatpercentile(boot_chisqr, 84) - stats.scoreatpercentile(boot_chisqr, 16)
    dof = len(x) - len(fn.parameter_names)
    chi_sqr = original_ensamble_chisqr
    results.log(
        OUTPUT,
        u"\u03c7\u00b2 ={},   \u03c7\u00b2 / dof = {}, Qual {}\n".format(
            chi_sqr, chi_sqr / dof, quality_of_fit(dof, chi_sqr)
        ),
    )

    logging.debug(
        "chiave:{}, chi_med:{}, chi_min:{}, chi_std:{}, chi_range{}".format(
            chi_average, chi_median, chi_min, chi_std, chi_range
        )
    )

    if bootstraps > 1 and filestub:
        bootfilename = filestub + ".boot"
        if options.jackknife:
            bootfilename = filestub + ".jack"
        results.info("writing each bootstrap parameter to {}".format(bootfilename))
        with open(bootfilename, "w") as bootfile:
            str_ensamble_params = ", ".join([str(p) for p in original_ensamble_correlatedfit])
            bootfile.write(
                "#bootstrap, {}, \t ensamble mean: {}\n".format(", ".join(fn.parameter_names), str_ensamble_params)
            )
            for i, params in enumerate(boot_params):
                strparams = ", ".join([str(p) for p in params])
                bootfile.write("{}, {}\n".format(i, strparams))

    if options.output_stub and writecor:
        write_fitted_cor(fn, cor, tmin, tmax, options, boot_averages, errors=boot_std)
    if options.plot and bootstraps > 1:
        plot_fit(fn, cor, tmin, tmax, options, boot_averages, errors=boot_std)

    if return_chi:
        return boot_averages, boot_std, chi_sqr / dof
    if return_quality:
        return boot_averages, boot_std, quality_of_fit(dof, chi_sqr)
    else:
        return boot_averages, boot_std