Пример #1
0
def load(input_file="SCF.pkl"):
    """Get a selfconsistent calculation"""
    return inout.load(input_file)
Пример #2
0
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt

import inout
import network


def remaining_noise(clean_data, noisy_data):
    # return np.sqrt(np.mean((noisy_data - clean_data) ** 2))
    return np.linalg.norm(clean_data - noisy_data)


# Data
training_images_clean, validation_images_noisy, validation_images_clean, test_images_noisy = inout.load()

# Calculate Standard Deviation of Error
sigma = remaining_noise(validation_images_clean, validation_images_noisy)

# Model
clean_image = tf.placeholder(tf.float32, shape=[None, 1, 28, 28])
noisy_image = tf.placeholder(tf.float32, shape=[None, 1, 28, 28])
encoded, decoded = network.denoising_autoencoder(noisy_image)
loss = tf.keras.backend.binary_crossentropy(tf.reshape(clean_image, [-1, 28 * 28]), decoded)
# loss = tf.reduce_mean((tf.reshape(clean_image, [-1, 28 * 28]) - decoded) ** 2)
train_step = tf.train.AdadeltaOptimizer().minimize(loss)

# Train
count_epochs = 50
images_per_batch = 40
count_batches = int(training_images_clean.shape[0] / images_per_batch)
Пример #3
0
def load(input_file="hamiltonian.pkl"):
    return inout.load(input_file)
Пример #4
0
    def convert(self):
        ftypes = ['tif', 'jpg', 'tiff', 'bmp', 'png', 'dm4', 'dm3']
        # Get the image paths
        if self.varSubfolder.get():
            for ftype in ftypes:
                file_paths = glob.glob(''.join(
                    (self.input_path, '/**/*.', ftype)),
                                       recursive=True)
                if len(file_paths) > 0:
                    break
        else:
            for ftype in ftypes:
                file_paths = glob.glob(''.join(
                    (self.input_path, '/*.', ftype)))
                if len(file_paths) > 0:
                    break

        # Cancel if no files
        if len(file_paths) == 0:
            print('No valid files found.')
            self.cancel()
            time.sleep(0.1)

        # get image info
        im0 = inout.load(file_paths[0])
        rows, cols = im0.shape
        num_frames = len(file_paths)
        row_min = 0
        row_max = rows
        col_min = 0
        col_max = cols

        if self.varSelectRegion.get():
            # Get a sampling of the data for selection purposes
            sampling = np.zeros((rows, cols, 10), dtype='uint8')
            mid = int(num_frames / 2)
            for i in range(10):
                try:
                    sampling[:, :, i] = inout.load(file_paths[mid + i])
                    last_good = i
                except IndexError:
                    sampling[:, :, i] = sampling[:, :, last_good]

            # Display a blurred sampling and select the target area
            sampling = operations.gaussian_stacking(sampling, 3)[:, :, 5]
            sampling = operations.gaussian(sampling, 2)
            plt.axis('off')
            figManager = plt.get_current_fig_manager()
            try:
                figManager.window.showMaximized()
            except:
                figManager.window.state('zoomed')
            plt.gca().set_position([0, 0, 1, 1])
            plt.imshow(sampling, cmap=plt.cm.gray)
            a = selectRect()
            plt.show()

            # Clean selection data
            row_min = int(min(a.y0, a.y1))
            row_max = int(max(a.y0, a.y1) + 1)
            col_min = int(min(a.x0, a.x1))
            col_max = int(max(a.x0, a.x1) + 1)
            rows = row_max - row_min
            cols = col_max - col_min

        # Create numpy array
        frames = np.zeros((rows, cols, num_frames), dtype='uint8')
        print('Loading frames...')
        start = time.time()

        for i, path in enumerate(file_paths):
            if i % 10 == 0:
                visualize.update_progress(i / num_frames)
            image = inout.load(path)
            frames[:, :, i] = image[row_min:row_max, col_min:col_max]
        print('Done, took', round(time.time() - start, 2), 'seconds.')

        # Convert to hdf5 file
        print('Converting to hdf5 file...', end=' ')
        start = time.time()
        with h5py.File(self.output_path, 'w') as f:
            #dset = f.create_dataset("data", (rows,cols,num_frames), compression='lzf', data=frames)
            dset = f.create_dataset("data", (rows, cols, num_frames),
                                    data=frames)

        print('Done, took', round(time.time() - start, 2), 'seconds.')

        self.cancel()