def validatePatient(input):
    """
    Validates patient dict input, checking that fields are proper and exist

    :returns: -1 if not successful, 1 if successful
    """
    if (not isinstance(input, type({}))):
        logging.error("didn't pass in dict")
        return -1
    if "patient_id" not in input.keys():
        logging.error("patient id missing")
        return -1
    if "attending_email" not in input.keys():
        logging.error("attending email missing")
        return -1
    if "user_age" not in input.keys():
        logging.error("user age missing")
        return -1
    try:
        if (float(input["user_age"]) < 0):
            logging.errror("user age out of bounds")
            return -1
        if (int(input["patient_id"]) < 0):
            logging.errror("patient id out of bounds")
            return -1
    except:
        logging.error("non numeric user age or patient id")
        return -1
    return 1
示例#2
0
文件: base.py 项目: 447327642/osqa
def update_trigger():
    # Trigger the update process
    now = datetime.datetime.now()
    if (now - settings.LATEST_UPDATE_DATETIME) > datetime.timedelta(days=1):
        try:
            update_status = check_for_updates()
            logging.log(logging.INFO, smart_unicode("Update process has been triggered: %s" % update_status))
        except Exception, e:
            logging.errror(smart_unicode(e))
        finally:
示例#3
0
    def get_request(self):
        """Adds logging of socket errors on connect

        The default implementations simply swallows socket errors making
        it hard to see why calls like handle_request have simply
        returned without actually doing anything."""
        try:
            return socketserver.TCPServer.get_request(self)
        except socket.error as e:
            logging.errror("Server socket error %s", repr(e))
            raise
示例#4
0
    def get_request(self):
        """Adds logging of socket errors on connect

        The default implementations simply swallows socket errors making
        it hard to see why calls like handle_request have simply
        returned without actually doing anything."""
        try:
            return socketserver.TCPServer.get_request(self)
        except socket.error as e:
            logging.errror("Server socket error %s", repr(e))
            raise
示例#5
0
def update_trigger():
    # Trigger the update process
    now = datetime.datetime.now()
    if (now - settings.LATEST_UPDATE_DATETIME) > datetime.timedelta(days=1):
        try:
            update_status = check_for_updates()
            logging.log(
                logging.INFO,
                smart_unicode("Update process has been triggered: %s" %
                              update_status))
        except Exception, e:
            logging.errror(smart_unicode(e))
        finally:
示例#6
0
    def _set_camera_feature(self, name, val):
        """Change camera settings.

        Note: OutOfRangeException related to features with certain increment
        (e.g. height, width) throws an exception which cannot be handle here.

        Parameters
        ----------
        name : string
            Name of the feature e.g. 'ExposureTime'
        val :
            Value to be set. Depending on the feature this might
            be int, string, bool etc.
        """

        if name in self._cam:
            cam_was_acquiring = self._cam.is_acquiring()
            try:
                # Try to set the value even if live feed is running.
                self._cam[name].value = val
                print(f"Feature '{name}' was succesfully set to {val}.")
            except AccessModeError:
                logging.warning(
                    f"Could not change the feature {name} on the fly. Pausing and trying again."
                )
                self._cam.stop_acquisition()
                try:
                    self._cam[name].value = val
                    print(f"Feature '{name}' was succesfully set to {val}.")
                except AccessModeError as ame:
                    # Could not set the value even with feed paused.
                    logging.errror(ame)
            except ValueError as ve:
                # Catch value error from both of the previous cases.
                logging.error(ve)
            except OutOfRangeException as ore:
                # FIXME Catching this exception doesn't work properly.
                # It might be that camazing is not throwing it as it should.
                logging.error(f"Increment exception probably: {ore}")
                # print(ore)
            except:
                logging.error(
                    f"Unexpected exception while trying to set the feature {name}"
                )
            finally:
                # Try to restart acquisition even if exceptions occurred.
                if cam_was_acquiring:
                    self._cam.start_acquisition()
        else:
            logging.warning(
                f"Feature '{name}' is not valid. Try again with valid name.")
示例#7
0
def create_tf_graph(dataset, fq_graph, train):

    graph_inputs = {}

    g = tf.Graph()
    print('Creating Tensorflow graph for {}'.format(fq_graph.name))

    if 'qnn' in fq_graph.name.lower():
        quantization_type = 'qnn'
    elif 'dorefa' in fq_graph.name.lower():
        quantization_type = 'dorefa'
    elif 'wrpn' in fq_graph.name.lower():
        quantization_type = 'wrpn'
    else:
        logging.errror('Unknown quantization type for network: {}'.format(
            fq_graph.name))

    print('Gradient dtype: {}'.format(fq_graph.grad_dtype))
    grad_dtype = fq_graph.grad_dtype
    grad_bits = grad_dtype.bits
    print('Gradient dtype bits: {}'.format(grad_bits))

    nvmlInit()
    gpu_handle = nvmlDeviceGetHandleByIndex(0)
    gpu_name = nvmlDeviceGetName(gpu_handle)

    def get_sparsity(x):
        with g.name_scope('sparsity_op'):
            with tf.device("/cpu:0"):
                x_size = tf.cast(tf.size(x), tf.float32)
                non_zero = tf.count_nonzero(x, dtype=tf.float32)
                sparsity = 1. - (non_zero / x_size)
                return sparsity

    def quantize(x, k):
        with tf.device("/gpu:0"):
            n = float(2**k - 1)
            with g.gradient_override_map({"Round": "Identity"}):
                return tf.round(x * n) / n

    try:

        @tf.RegisterGradient("FGGrad_1bit")
        def grad_fg_1(op, x):
            with tf.device("/cpu:0"):
                tf.summary.scalar('backprop-sparsity', get_sparsity(x))
            with tf.device("/gpu:0"):
                bitG = 1
                rank = x.get_shape().ndims
                assert rank is not None
                maxx = tf.reduce_max(tf.abs(x),
                                     list(range(1, rank)),
                                     keep_dims=True)
                x = x / maxx
                n = float(2**bitG - 1)
                x = x * 0.5 + 0.5 + tf.random_uniform(
                    tf.shape(x), minval=-0.5 / n, maxval=0.5 / n)
                x = tf.clip_by_value(x, 0.0, 1.0)
                x = quantize(x, bitG) - 0.5
                return x * maxx * 2

        @tf.RegisterGradient("FGGrad_2bit")
        def grad_fg_2(op, x):
            with tf.device("/cpu:0"):
                tf.summary.scalar('backprop-sparsity', get_sparsity(x))
            with tf.device("/gpu:0"):
                bitG = 2
                rank = x.get_shape().ndims
                assert rank is not None
                maxx = tf.reduce_max(tf.abs(x),
                                     list(range(1, rank)),
                                     keep_dims=True)
                x = x / maxx
                n = float(2**bitG - 1)
                x = x * 0.5 + 0.5 + tf.random_uniform(
                    tf.shape(x), minval=-0.5 / n, maxval=0.5 / n)
                x = tf.clip_by_value(x, 0.0, 1.0)
                x = quantize(x, bitG) - 0.5
                return x * maxx * 2

        @tf.RegisterGradient("FGGrad_4bit")
        def grad_fg_4(op, x):
            with tf.device("/cpu:0"):
                tf.summary.scalar('backprop-sparsity', get_sparsity(x))
            bitG = 4
            with tf.device("/gpu:0"):
                rank = x.get_shape().ndims
                assert rank is not None
                maxx = tf.reduce_max(tf.abs(x),
                                     list(range(1, rank)),
                                     keep_dims=True)
                x = x / maxx
                n = float(2**bitG - 1)
                x = x * 0.5 + 0.5 + tf.random_uniform(
                    tf.shape(x), minval=-0.5 / n, maxval=0.5 / n)
                x = tf.clip_by_value(x, 0.0, 1.0)
                x = quantize(x, bitG) - 0.5
                return x * maxx * 2

        @tf.RegisterGradient("FGGrad_8bit")
        def grad_fg_8(op, x):
            with tf.device("/cpu:0"):
                tf.summary.scalar('backprop-sparsity', get_sparsity(x))
            with tf.device("/gpu:0"):
                bitG = 8
                rank = x.get_shape().ndims
                assert rank is not None
                maxx = tf.reduce_max(tf.abs(x),
                                     list(range(1, rank)),
                                     keepdims=True)
                x = x / maxx
                n = float(2**bitG - 1)
                x = x * 0.5 + 0.5 + tf.random_uniform(
                    tf.shape(x), minval=-0.5 / n, maxval=0.5 / n)
                x = tf.clip_by_value(x, 0.0, 1.0)
                x = quantize(x, bitG) - 0.5
                return x * maxx * 2

        @tf.RegisterGradient("FGGrad_16bit")
        def grad_fg_16(op, x):
            with tf.device("/cpu:0"):
                tf.summary.scalar('backprop-sparsity', get_sparsity(x))
            with tf.device("/gpu:0"):
                bitG = 16
                rank = x.get_shape().ndims
                assert rank is not None
                maxx = tf.reduce_max(tf.abs(x),
                                     list(range(1, rank)),
                                     keep_dims=True)
                x = x / maxx
                n = float(2**bitG - 1)
                x = x * 0.5 + 0.5 + tf.random_uniform(
                    tf.shape(x), minval=-0.5 / n, maxval=0.5 / n)
                x = tf.clip_by_value(x, 0.0, 1.0)
                x = quantize(x, bitG) - 0.5
                return x * maxx * 2

        @tf.RegisterGradient("FGGrad_32bit")
        def grad_fg_32(op, x):
            with tf.device("/cpu:0"):
                tf.summary.scalar('backprop-sparsity', get_sparsity(x))
            return x
    except:
        pass

    def dorefa_quantize_gradient(x, bitG):
        with tf.device("/gpu:0"):
            grad_name = 'FGGrad_{}bit'.format(bitG)
            with g.gradient_override_map({"Identity": grad_name}):
                return tf.identity(x)

    def dorefa_quantize_weights(x, bitW):
        with tf.device("/gpu:0"):
            if bitW == 32:
                return x
            if bitW == 1:  # BWN
                with g.gradient_override_map({"Sign": "Identity"}):
                    E = tf.stop_gradient(tf.reduce_mean(tf.abs(x)))
                    return tf.sign(x / E) * E
            x = tf.tanh(x)
            x = x / tf.reduce_max(tf.abs(x)) * 0.5 + 0.5
            return 2 * quantize(x, bitW) - 1

    def wrpn_quantize_weights(x, bitW):
        with tf.device("/gpu:0"):
            cx = tf.clip_by_value(x, -1, 1)
            return quantize(cx, bitW - 1)

    def dorefa_quantize_activations(x, bitA):
        with tf.device("/gpu:0"):
            if bitA == 32:
                return x
            return quantize(x, bitA)

    def wrpn_quantize_activations(x, bitA):
        with tf.device("/gpu:0"):
            if bitA == 32:
                return x
            cx = tf.clip_by_value(x, 0, 1)
            return quantize(cx, bitA)

    def _get_weights(shape, name, bits):
        w = tf.Variable(tf.random_normal(shape, dtype=tf.float32, stddev=1e-1),
                        trainable=True,
                        name=name)
        if quantization_type == 'qnn':
            return dorefa_quantize_weights(w, bits)
        elif quantization_type == 'dorefa':
            return dorefa_quantize_weights(w, bits)
        else:
            return wrpn_quantize_weights(w, bits)

    def _get_inputs(shape, name):
        if 'data' in name:
            print(name, shape)
            n, c, h, w = shape
            graph_inputs['inputs/data'] = tf.placeholder(tf.float32,
                                                         shape=[n, h, w, c],
                                                         name=name)
            return tf.transpose(graph_inputs['inputs/data'], [0, 3, 1, 2])
        else:
            print(name, shape)
            batch, num_classes = shape[0], shape[1]
            graph_inputs['inputs/labels'] = tf.placeholder(tf.int32,
                                                           shape=[batch],
                                                           name=name)
            return tf.one_hot(graph_inputs['inputs/labels'], num_classes)

    def _nonlin(x, bits):
        if bits == 32:
            return tf.nn.relu(x)
        return tf.clip_by_value(x, 0., 1.)

    def _activation(x, bits):
        with tf.device("/gpu:0"):
            with tf.name_scope('activation'):
                if quantization_type == 'dorefa':
                    qa = dorefa_quantize_activations(_nonlin(x, bits), bits)
                    ret = dorefa_quantize_gradient(qa, grad_bits)
                elif quantization_type == 'qnn':
                    qa = dorefa_quantize_activations(_nonlin(x, bits), bits)
                    ret = dorefa_quantize_gradient(qa, grad_bits)
                else:
                    # act = tf.nn.relu(x)
                    qa = wrpn_quantize_activations(act, bits)
                    ret = dorefa_quantize_gradient(qa, 32)
                return ret

    def _conv(op):

        with tf.name_scope(op.name):
            strides = [1, 1, op.stride[-2], op.stride[-1]]
            i = tf_tensor_registry[op.data.name]

            with tf.device("/cpu:0"):
                tf.summary.scalar('fwdprop-sparsity', get_sparsity(i))

            with tf.device("/gpu:0"):
                cout = op.weights.shape[-4]
                cin = op.weights.shape[-3]
                kh = op.weights.shape[-2]
                kw = op.weights.shape[-1]
                w = _get_weights([kh, kw, cin, cout],
                                 name=op.weights.name,
                                 bits=op.weights.dtype.bits)
                b = _get_weights([cout],
                                 name=op.name + 'bias',
                                 bits=op.weights.dtype.bits)
                pad = 'SAME' if op.pad[0] > 0 else 'VALID'
                if i.shape[1] != cin:
                    i = tf.transpose(i, [0, 3, 1, 2])
                conv_out = tf.nn.conv2d(i,
                                        w,
                                        strides,
                                        pad,
                                        name=op.name,
                                        data_format='NCHW')
                o = _activation(
                    tf.nn.bias_add(conv_out, b, data_format='NCHW'),
                    op.output_tensors.dtype.bits)
                tf_tensor_registry[op.output_tensors.name] = o
                # print(op.output_tensors.name)

    def _maxpool(op):
        with tf.device("/gpu:0"):
            with tf.name_scope(op.name):
                strides = [1, 1, op.stride[-2], op.stride[-1]]
                i = tf_tensor_registry[op.data.name]
                pad = 'SAME' if op.pad[0] > 0 else 'VALID'
                kernel = [1, 1, op.pooling_kernel[-2], op.pooling_kernel[-1]]
                o = tf.nn.max_pool(i, kernel, strides, pad, data_format='NCHW')
                tf_tensor_registry[op.output_tensors.name] = o

    def _flatten(op):
        with tf.device("/gpu:0"):
            with tf.name_scope(op.name):
                i = tf_tensor_registry[op.data.name]
                o = tf.reshape(i, op.output_tensors.shape)
                tf_tensor_registry[op.output_tensors.name] = o

    def _matmul(op):
        with tf.name_scope(op.name):
            with tf.device("/cpu:0"):
                w = _get_weights(op.weights.shape,
                                 name=op.weights.name,
                                 bits=op.weights.dtype.bits)
                b = tf.Variable(
                    tf.constant(0.0,
                                shape=[op.output_tensors.shape[-1]],
                                dtype=tf.float32),
                    trainable=True,
                    name='biases')
                i = tf_tensor_registry[op.data.name]
                tf.summary.scalar('fwdprop-sparsity', get_sparsity(i))
            with tf.device("/gpu:0"):
                o = _activation(
                    tf.matmul(i, w) + b, op.output_tensors.dtype.bits)
                tf_tensor_registry[op.output_tensors.name] = o

    def _xentropy(op):
        with tf.device("/gpu:0"):
            with tf.name_scope('X-entropy'):
                logits = tf_tensor_registry[op.logits.name]
                tf_tensor_registry['logits'] = logits
                labels = tf_tensor_registry[op.labels.name]
                cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(
                    logits=logits, labels=labels, name=op.output_tensors.name)
                tf_tensor_registry['loss'] = cross_entropy

    def _concat(op):
        with tf.device("/gpu:0"):
            with tf.name_scope(op.name):
                assert len(op.data) > 1, op.data
                input_tensors = [tf_tensor_registry[x.name] for x in op.data]
                o = tf.concat(input_tensors, op.concat_dim, name=op.name)
                tf_tensor_registry[op.output_tensors.name] = o

    def _add(op):
        with tf.device("/gpu:0"):
            with tf.name_scope(op.name):
                assert len(op.data) == 2, op.data
                a, b = [tf_tensor_registry[x.name] for x in op.data]
                o = a + b
                tf_tensor_registry[op.output_tensors.name] = o

    def _globalAvgPool(op):
        with tf.device("/gpu:0"):
            with tf.name_scope(op.name):
                i = tf_tensor_registry[op.data.name]
                n, c, h, w = op.data.shape
                o = tf.reduce_mean(i, [2, 3])
                tf_tensor_registry[op.output_tensors.name] = o

    with g.as_default():
        tf_tensor_registry = {}
        for tname, t in fq_graph.tensor_registry.iteritems():
            if t.name.startswith('input'):
                i = _get_inputs(t.shape, t.name)
                tf_tensor_registry[tname] = i

        for opname, op in fq_graph.op_registry.iteritems():
            if op.__class__.__name__ == 'Convolution':
                _conv(op)
            elif op.__class__.__name__ == 'MaxPooling':
                _maxpool(op)
            elif op.__class__.__name__ == 'Flatten':
                _flatten(op)
            elif op.__class__.__name__ == 'MatMul':
                _matmul(op)
            elif op.__class__.__name__ == 'CrossEntropyLoss':
                _xentropy(op)
            elif op.__class__.__name__ == 'Concat':
                _concat(op)
            elif op.__class__.__name__ == 'Add':
                _add(op)
            elif op.__class__.__name__ == 'GlobalAvgPooling':
                _globalAvgPool(op)
            else:
                name = op.__class__.__name__
                assert 'Backprop' in name or 'Grad' in name, name
        loss = tf_tensor_registry['loss']

        if train:
            with tf.device("/gpu:0"):
                lr = tf.get_variable('learning_rate',
                                     initializer=1e-4,
                                     trainable=False)
                global_step = tf.train.get_or_create_global_step(
                    graph=tf.get_default_graph())

                opt = tf.train.AdamOptimizer(lr, epsilon=1e-5)
                train_op = opt.minimize(loss, global_step=global_step)

            with tf.device("/cpu:0"):
                tf.summary.scalar('learning_rate', lr)
        else:
            train_op = loss

        graph_data = graph_inputs['inputs/data']
        graph_labels = graph_inputs['inputs/labels']
        graph_logits = tf_tensor_registry['logits']
        print(graph_data, graph_labels, graph_logits)

        return g, train_op, tf.summary.merge_all(
        ), graph_data, graph_labels, graph_logits