def kernel(self, inputs): x = inputs['image'] x = Tensor(tf.cast(x.data, tf.float32)) label = inputs['label'] label = Tensor(tf.cast(label.data, tf.float32)) label = tf.reshape(label.data, (32, 1)) h = self.get_or_create_graph('layer0', Dense( 'dense0', n_units=32, activation='relu'))( tf.layers.flatten(x.data)) y_ = self.get_or_create_graph('layer1', Dense( 'dense1', n_units=10, activation='relu'))(h) y = self.get_or_create_graph('layer3', Dense( 'dense2', n_units=10, activation='relu'))(label) return { 'inference' : y_, 'label' : y }
def post_processing(dataset, padding_size): hits = dataset.tensors['hits'] shape = shape_list(hits.data) shape[1] = padding_size hits = Tensor(tf.reshape(hits.data, shape)) label = Tensor( OneHot(padding_size)(dataset.tensors['first_hit_index'].data)) return DatasetIncidentSingle(hits, label, dataset.tensors['padded_size'])
def _convert(self, v): result = Tensor(v) if self.config(self.KEYS.CONFIG.BATCH_SIZE) is not None: shape = result.data.shape.as_list() shape[0] = self.config(self.KEYS.CONFIG.BATCH_SIZE) if shape.count(None) == 1: shape[shape.index(None)] = -1 result = Tensor(tf.reshape(result.data, shape)) return result
def main_basic(job, task): cfg = {"worker": ["localhost:2222", "localhost:2223"]} make_distribute_host(cfg, job, task, None, 'worker', 0) master_host = Master.master_host() this_host = ThisHost.host() host1 = Host(job, 1) hmi = DistributeGraphInfo(None, None, None, master_host) with tf.variable_scope('scope_test'): t0 = TensorVariable(VariableInfo(None, [1], tf.float32), hmi.update(name='v0')) aop = tf.assign(t0.data, tf.constant([3.])) t1 = TensorNumpyNDArray([1.0], None, hmi.update(name='v1')) t1c = t1.copy_to(host1) t1p = Tensor(t1c.data + 1, t1c.data_info, t1c.graph_info.update(name='t1_plus')) make_distribute_session() if task == 0: ptensor(t1) Server.join() if task == 1: ptensor(t1) ptensor(t1c) ptensor(t1p) ptensor(t0) ThisSession.run(aop) ptensor(t0)
def dataset_fast(path_table, batch_size, is_shuffle, nb_hits, is_train): table = ShuffledHitsTable(path_table) padding_size = table.padding_size table = filter_by_nb_hits(table, nb_hits) # table = drop_padded_hits(table, nb_hits) if is_train is not None: table = ShuffledHitsColumns( table.dataclass, list(Train80Partitioner(is_train).partition(table))) dtypes = { 'hits': np.float32, 'first_hit_index': np.int32, 'padded_size': np.int32, } data = { k: np.array([getattr(table.data[i], k) for i in range(table.capacity)], dtype=dtypes[k]) for k in table.columns } dataset = tf.data.Dataset.from_tensor_slices(data) dataset = dataset.repeat() if is_shuffle: dataset = dataset.shuffle(4 * batch_size) dataset = dataset.batch(batch_size) tensors = dataset.make_one_shot_iterator().get_next() for k in tensors: tensors[k] = Tensor(tensors[k]) tensors['first_hit_index'] = OneHot(tensors['hits'].shape[1])( tensors['first_hit_index']) return DatasetIncidentSingle(tensors['hits'], tensors['first_hit_index'], tensors['padded_size'])
def kernel(self, inputs): # the default order of the image is z-dominant(z,y,x) # for projection another two images are created. img = inputs[self.KEYS.TENSOR.IMAGE].data effmap = inputs[self.KEYS.TENSOR.EFFICIENCY_MAP].data sinos = inputs[self.KEYS.TENSOR.SINOGRAM].data matrixs = inputs[self.KEYS.TENSOR.SYSTEM_MATRIX].data tran_matrixs = tf.sparse_transpose(matrixs) """ The following codes need rewrite """ proj = tf.sparse_tensor_dense_matmul(matrixs,img) con = tf.ones(proj.shape)/100000000 proj = proj+con temp_proj = sinos/proj temp_bp = tf.sparse_tensor_dense_matmul(tran_matrixs,temp_proj) result = img / effmap * temp_bp #result = img * temp_bp #result = result / tf.reduce_sum(result) * tf.reduce_sum(sinos) ###efficiencymap #result = tf.sparse_tensor_dense_matmul(tran_matrixs,sinos) return Tensor(result, None, self.graph_info.update(name=None))
def projection(self, image, lors): lors = lors.transpose() return Tensor(Op.get_module().projection( lors=lors.data, image=image.data, grid=image.grid, center=image.center, size=image.size, tof_bin=self.config(self.KEYS.TOF_BIN), tof_sigma2=self.config(self.KEYS.TOF_SIGMA2)))
def maplors(self, lors, image: Image): lors_value = lors['lors_value'] lors = lors['lors'] lors = lors.transpose() result = Tensor(Op.get_module().maplors(image=image.data, grid=image.grid, center=image.center, size=image.size, lors=lors.data, lors_value=lors_value.data)) return result
def projection(self, image, lors): lors = lors.transpose() return Tensor(Op.get_module().projection_gpu( lors=lors.data, image=image.data, grid=image.grid[::-1], center=image.center[::-1], size=image.size[::-1], kernel_width=self.config(self.KEYS.KERNEL_WIDTH), tof_bin=self.config(self.KEYS.TOF_BIN), tof_sigma2=self.config(self.KEYS.TOF_SIGMA2)))
def backprojection(self, lors, image: Image): lors_value = lors['lors_value'] lors = lors['lors'] lors = lors.transpose() result = Tensor(Op.get_module().backprojection_gpu( image=image.data, grid=image.grid[::-1], center=image.center[::-1], size=image.size[::-1], lors=lors.data, lors_value=lors_value.data, kernel_width=self.config(self.KEYS.KERNEL_WIDTH))) return result
def backprojection(self, lors, image): lors_value = lors['lors_value'] lors = lors['lors'] lors = lors.transpose() result = Tensor(Op.get_module().backprojection( image=image.data, grid=image.grid, center=image.center, size=image.size, lors=lors.data, lors_value=lors_value.data, tof_bin=self.config(self.KEYS.TOF_BIN), tof_sigma2=self.config(self.KEYS.TOF_SIGMA2))) return result
def kernel(self, inputs): if len(inputs) == 0: return None ip: tf.Tensor = inputs[self.KEYS.TENSOR.INPUT].data ip_shape = ip.shape.as_list() size = ip_shape[0] // self._nb_split result = {} for i in range(self._nb_split): result['slice_{}'.format(i)] = tf.slice(ip, [size * i, 0], [size, ip_shape[1]]) ginfo = inputs[self.KEYS.TENSOR.INPUT].graph_info result = { k: Tensor(result[k], None, ginfo.update(name=ginfo.name + '_{}'.format(k))) for k in result } return result
def kernel(self, inputs): if len(inputs) == 0: return None data: tf.Tensor = inputs[self.KEYS.TENSOR.INPUT].data data_shape = data.shape.as_list() size = data_shape[0] // self._nb_split # the last data slice may contain more data.(no data truncation) last_size = data_shape[0] - size * (self._nb_split - 1) result = {} for i in range(self._nb_split - 1): result['slice_{}'.format(i)] = tf.slice(data, [size * i, 0], [size, data_shape[1]]) # arrange the last slice individully. result['slice_{}'.format(self._nb_split - 1)] = tf.slice( data, [size * self._nb_split - 1, 0], [last_size, data_shape[1]]) ginfo = inputs[self.KEYS.TENSOR.INPUT].graph_info result = { k: Tensor(result[k], None, ginfo.update(name=ginfo.name + '_{}'.format(k))) for k in result } return result
def main(job, task): tf.logging.set_verbosity(0) cfg = {"worker": ["localhost:2222", "localhost:2223"]} make_distribute_host(cfg, job, task, None, 'worker', 0) # # if task == 1: # # time.sleep(10) # with tf.device(Master.master_host().device_prefix()): # with tf.variable_scope('test'): # t1 = tf.get_variable('var', [], tf.float32) master_host = Master.master_host() this_host = ThisHost.host() host2 = Host(job, 1) hmi = DistributeGraphInfo(None, None, None, master_host) with tf.variable_scope('scope_test'): t0 = TensorVariable(VariableInfo(None, [1], tf.float32), DistributeGraphInfo.from_(hmi, name='t1')) aop = tf.assign(t0.data, tf.constant([3.])) t1 = TensorNumpyNDArray([1.0], None, DistributeGraphInfo.from_(hmi, name='t1_copy')) t1c = t1.copy_to(host2) t1p = Tensor(t1c.data + 1, t1c.data_info, DistributeGraphInfo.from_(t1c.graph_info, name='t1_plus')) # t2 = t0.copy_to(host2) make_distribute_session() if task == 0: # ThisSession.run(tf.global_variables_initializer()) ptensor(t1) Server.join() if task == 1: ptensor(t1) ptensor(t1c) ptensor(t1p) # print(t2.run()) # print(t2.data) # print(t0.run()) # print(t0) ptensor(t0) print(ThisSession.run(aop)) ptensor(t0)
def apply_model(model, dataset, spec): infer = model({'hits': Tensor(dataset['hits'])}) label = dataset['crystal_index'] loss = tf.losses.sigmoid_cross_entropy(label, infer.data) acc = same_crystal_accuracy(label, infer.data, spec) return {"infer": infer, "loss": loss, "accuracy": acc, 'label': label}
def _(x, nb_units): return Tensor(_dense(x.data, nb_units))
def __call__(self, x): gtx = get_global_context() keep_prob = gtx.tensors[gtx.KEYS.TENSOR.KEEP_PROB] return Tensor(tf.nn.dropout(x.data, keep_prob.data))
def _(x, nb_classes): return Tensor(_one_hot(x.data, nb_classes))
def _(x): return Tensor(_ReLU(x.data))