def __init__(self, net, layer_idx): self.data = None self.label = None self.shmem = [] self.name = net._layer_names[layer_idx] handles = self.name.split('-', 1) # print "Allocating shared memory for %s." % handles[0] handle = prepend_pid(handles[0]) shmem, arr = create_shmem_ndarray('/'+handle, net.blobs[handles[0]].data.shape, np.float32, flags=posix_ipc.O_CREAT) self.data = arr self.shmem.append(shmem) if len(handles) == 2: handle = prepend_pid(handles[1]) shmem, arr = create_shmem_ndarray('/'+handle, net.blobs[handles[1]].data.shape, np.float32, flags=posix_ipc.O_CREAT) self.label = arr self.shmem.append(shmem) net.set_input_arrays(self.data, self.label, layer_idx) else: if SharedData._null_array is None: SharedData._null_array = np.empty( (self.data.shape[0], 1, 1, 1), dtype=np.float32) print("[SharedData] Warning: didn't specify a handle for the " "label in layer", layer_idx, ". Should not be used in net.", file=sys.stderr) net.set_input_arrays(self.data, SharedData._null_array, layer_idx)
def __init__(self, net, param_name): self.caffe_grads = [] self.shared_grads = [] self.shmem = [] for i, param in enumerate(net.params[param_name]): # Typically, we'll have two, a weight and bias. handle = prepend_pid(param_name + '_' + SharedGradient.POSTFIX[i]) shmem, arr = create_shmem_ndarray('/' + handle, param.diff.shape, np.float32, flags=posix_ipc.O_CREAT) self.caffe_grads.append(param.diff) self.shared_grads.append(arr) self.shmem.append(shmem)