def __init__(self, prototxt, weights, inputs, outputs, volume_specs=None, use_gpu=None): for f in [prototxt, weights]: if not os.path.isfile(f): raise RuntimeError("%s does not exist" % f) self.prototxt = prototxt self.weights = weights self.inputs = inputs self.outputs = outputs if use_gpu is not None: logger.debug("Predict process: using GPU %d" % use_gpu) caffe.enumerate_devices(False) caffe.set_devices((use_gpu, )) caffe.set_mode_gpu() caffe.select_device(use_gpu, False) self.net = caffe.Net(self.prototxt, self.weights, caffe.TEST) self.net_io = NetIoWrapper(self.net, self.outputs.values())
def start(self): logger.info("Initializing solver...") if self.use_gpu is not None: logger.debug("Predict process: using GPU %d" % self.use_gpu) caffe.enumerate_devices(False) caffe.set_devices((self.use_gpu, )) caffe.set_mode_gpu() caffe.select_device(self.use_gpu, False) self.net = caffe.Net(self.prototxt, self.weights, caffe.TEST) self.net_io = NetIoWrapper(self.net, self.outputs.keys())
def __predict(self, use_gpu): if not self.net_initialized: logger.info("Initializing solver...") if use_gpu is not None: logger.debug("Predict process: using GPU %d" % use_gpu) caffe.enumerate_devices(False) caffe.set_devices((use_gpu, )) caffe.set_mode_gpu() caffe.select_device(use_gpu, False) self.net = caffe.Net(self.prototxt, self.weights, caffe.TEST) self.net_io = NetIoWrapper(self.net) self.net_initialized = True start = time.time() batch = self.batch_in.get() fetch_time = time.time() - start self.net_io.set_inputs({ 'data': batch.volumes[VolumeTypes.RAW].data[np.newaxis, np.newaxis, :], }) self.net.forward() output = self.net_io.get_outputs() predict_time = time.time() - start logger.info( "Predict process: time=%f (including %f waiting for batch)" % (predict_time, fetch_time)) assert len( output['aff_pred'].shape ) == 5, "Got affinity prediction with unexpected number of dimensions, should be 1 (direction) + 3 (spatial) + 1 (batch, not used), but is %d" % len( output['aff_pred'].shape) batch.volumes[VolumeTypes.PRED_AFFINITIES] = Volume( output['aff_pred'][0], Roi(), (1, 1, 1)) return batch
def start(self): logger.info("Initializing solver...") if self.use_gpu is not None: logger.debug("Train process: using GPU %d", self.use_gpu) caffe.enumerate_devices(False) caffe.set_devices((self.use_gpu, )) caffe.set_mode_gpu() caffe.select_device(self.use_gpu, False) self.solver = caffe.get_solver(self.solver_parameters) if self.solver_parameters.resume_from is not None: logger.debug("Train process: restoring solver state from %s", self.solver_parameters.resume_from) self.solver.restore(self.solver_parameters.resume_from) names_net_outputs = self.outputs.keys() + self.gradients.keys() self.net_io = NetIoWrapper(self.solver.net, names_net_outputs)
def __init__(self, prototxt, weights, input_key, output_key, gpu): # TODO validate that gpu is available assert os.path.exists(prototxt) assert os.path.exists(weights) for f in [prototxt, weights]: if not os.path.isfile(f): raise RuntimeError("%s does not exist" % f) self.prototxt = prototxt self.weights = weights self.input_key = input_key self.output_key = output_key caffe.enumerate_devices(False) caffe.set_devices((gpu, )) caffe.set_mode_gpu() caffe.select_device(gpu, False) self.net = caffe.Net(self.prototxt, self.weights, caffe.TEST) self.net_io = NetIoWrapper(self.net, [self.output_key])
def __train(self, use_gpu): start = time.time() if not self.solver_initialized: logger.info("Initializing solver...") if use_gpu is not None: logger.debug("Train process: using GPU %d" % use_gpu) caffe.enumerate_devices(False) caffe.set_devices((use_gpu, )) caffe.set_mode_gpu() caffe.select_device(use_gpu, False) self.solver = caffe.get_solver(self.solver_parameters) if self.solver_parameters.resume_from is not None: logger.debug("Train process: restoring solver state from " + self.solver_parameters.resume_from) self.solver.restore(self.solver_parameters.resume_from) self.net_io = NetIoWrapper(self.solver.net) self.solver_initialized = True batch, request = self.batch_in.get() data = { 'data': batch.volumes[VolumeType.RAW].data[np.newaxis, np.newaxis, :], 'aff_label': batch.volumes[VolumeType.GT_AFFINITIES].data[np.newaxis, :], } if self.solver_parameters.train_state.get_stage(0) == 'euclid': logger.debug( "Train process: preparing input data for Euclidean training") self.__prepare_euclidean(batch, data) else: logger.debug( "Train process: preparing input data for Malis training") self.__prepare_malis(batch, data) self.net_io.set_inputs(data) loss = self.solver.step(1) # self.__consistency_check() output = self.net_io.get_outputs() batch.volumes[VolumeType.PRED_AFFINITIES] = Volume( output['aff_pred'][0], batch.volumes[VolumeType.GT_AFFINITIES].roi, batch.volumes[VolumeType.GT_AFFINITIES].resolution, interpolate=True) batch.loss = loss batch.iteration = self.solver.iter if VolumeType.LOSS_GRADIENT in request.volumes: diffs = self.net_io.get_output_diffs() batch.volumes[VolumeType.LOSS_GRADIENT] = Volume( diffs['aff_pred'][0], batch.volumes[VolumeType.GT_AFFINITIES].roi, batch.volumes[VolumeType.GT_AFFINITIES].resolution, interpolate=True) time_of_iteration = time.time() - start logger.info("Train process: iteration=%d loss=%f time=%f" % (self.solver.iter, batch.loss, time_of_iteration)) return batch