Ejemplo n.º 1
0
def reconstruct_patches(args,
                        return_history=False,
                        verbose=False) -> Tuple[np.ndarray, list]:

    inputs = np.load(os.path.join(args.imgdir, args.imgname),
                     allow_pickle=True)

    pe = _get_patch_extractor(inputs.shape, args.patch_shape,
                              args.patch_stride, args.datadim, args.imgchannel)
    # this is necessary for setting pe attributes
    _ = pe.extract(inputs)
    patch_array_shape = u.patch_array_shape(inputs.shape, pe.dim, pe.stride)

    patches_out = []
    elapsed = []
    history = []
    for path in glob(os.path.join('./results', args.outdir) + '/*.npy'):
        try:
            out = np.load(path, allow_pickle=True).item()
        except AttributeError:
            out = np.load(path, allow_pickle=True).item()
        patches_out.append(out['output'])
        try:
            elapsed.append(out['elapsed'])
        except KeyError:
            elapsed.append(out['elapsed time'])
        history.append(out['history'])

    patches_out = np.asarray(patches_out)
    if args.datadim == '2.5d':
        patches_out = _transpose_patches_25d(patches_out, args.slice, adj=True)
    outputs = pe.reconstruct(
        patches_out.reshape(patch_array_shape)) / args.gain

    try:
        gpu_ = u.get_gpu_name(int(out['device']))
    except:
        gpu_ = out['device']

    if verbose:
        print('\n%d patches; total elapsed time on %s: %s' %
              (len(history), gpu_,
               u.sec2time(sum([u.time2sec(e) for e in elapsed]))))

    if return_history:
        return outputs, history
    else:
        return outputs
Ejemplo n.º 2
0
 def save_result(self):
     """
     Save the results, the model (if asked) and some info to disk in a .npy file.
     """
     np.save(os.path.join(self.outpath, self.image_name + '_run.npy'), {
         'device' : u.get_gpu_name(int(os.environ["CUDA_VISIBLE_DEVICES"])),
         'elapsed': u.sec2time(self.elapsed),
         'outpath': self.outpath,
         'history': self.history,
         'mask'   : self.mask,
         'image'  : self.img,
         'output' : self.out_best,
         'noise'  : self.input_list,
     })
     
     # save the model
     if self.args.savemodel:
         torch.save(self.net.state_dict(),
                    os.path.join(self.outpath, self.image_name + '_model.pth'))
Ejemplo n.º 3
0
import numpy as np

from utils.get_gpu_name import *

from tensorflow.python.client import device_lib

# In[2]:

# Get versions
print("OS: ", sys.platform)
print("Python: ", sys.version)
print("Keras: ", K.__version__)
print("Numpy: ", np.__version__)
print("Tensorflow: ", tf.__version__)
print("Keras Backend: ", K.backend.backend())
print("GPU: ", get_gpu_name())

# Print current directory
print(os.getcwd())

print(device_lib.list_local_devices())

# Test GPU; Error results if no GPU
with tf.device('/gpu:0'):
    a = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3], name='a')
    b = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2], name='b')
    c = tf.matmul(a, b)

# Creates a session with log_device_placement set to True
sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
Ejemplo n.º 4
0
import infer
import utils

print("GPU found: " + utils.get_gpu_name())
ldac = infer.LightDetectionAndClassification(detection_model = infer.SSD_MOBILE_NET)

desired_labels=["Red", "Green", "Yellow"]

files = ["traffic.jpg", "traffic2.jpg", "traffic3.jpg", "traffic4.jpg", "traffic5.jpg",
         "traffic6.jpg", "traffic7.jpg", "traffic8.jpg", "traffic9.jpg", "traffic10.jpg",
         "left0000.jpg", "left0003.jpg", "left0011.jpg", "left0027.jpg", "left0140.jpg", "left0701.jpg"]

for file in files:
    print("\n\n\n", file)
    ldac.infer_and_save(file, desired_labels=desired_labels, confidence_cutoff=0.6)

Ejemplo n.º 5
0
############# CONSTANTS
LR = 0.0001
BATCHSIZE = 2
EPOCHS = 100

WIDTH = 1024
HEIGHT = 1024


MULTI_GPU = True

print("OS: ", sys.platform)
print("Python: ", sys.version)
print("PyTorch: ", torch.__version__)
print("Numpy: ", np.__version__)
print("GPU: ", get_gpu_name())
print(get_cuda_version())
print("CuDNN Version ", get_cudnn_version())

CPU_COUNT = multiprocessing.cpu_count()
GPU_COUNT = len(get_gpu_name())
print("CPUs: ", CPU_COUNT)
print("GPUs: ", GPU_COUNT)

# Manually scale to multi-gpu
assert torch.cuda.is_available()
_DEVICE = torch.device('cuda:0')
# enables cudnn's auto-tuner
torch.backends.cudnn.benchmark = True
if MULTI_GPU:
    LR *= GPU_COUNT