コード例 #1
0
    def testcg(self):
        K.set_floatx('float64')

        from dlmri_tutorial.keras.layers import mri

        A = mri.MulticoilForwardOp(center=True)
        AH = mri.MulticoilAdjointOp(center=True)

        dc = DCPM(A, AH, weight_init=1.0, max_iter=50, tol=1e-12)

        shape = (5, 10, 10, 1)
        kshape = (5, 3, 10, 10)
        x = tf.complex(tf.random.normal(shape, dtype=tf.float64),
                       tf.random.normal(shape, dtype=tf.float64))
        y = tf.complex(tf.random.normal(kshape, dtype=tf.float64),
                       tf.random.normal(kshape, dtype=tf.float64))
        mask = tf.ones(kshape, dtype=tf.float64)
        smaps = tf.complex(tf.random.normal(kshape, dtype=tf.float64),
                           tf.random.normal(kshape, dtype=tf.float64))

        tf_a = tf.Variable(np.array([1.1]), trainable=True, dtype=tf.float64)
        tf_b = tf.Variable(np.array([1.1]), trainable=True, dtype=tf.float64)

        # perform a gradient check:
        epsilon = 1e-5

        def compute_loss(a, b):
            arg = dc([x * tf.complex(a, tf.zeros_like(a)), y, mask, smaps],
                     scale=1 / b)  # take 1/b
            return 0.5 * tf.math.real(tf.reduce_sum(tf.math.conj(arg) * arg))

        with tf.GradientTape() as g:
            g.watch(x)
            # setup the model
            tf_loss = compute_loss(tf_a, tf_b)

        # backpropagate the gradient
        dLoss = g.gradient(tf_loss, [tf_a, tf_b])

        grad_a = dLoss[0].numpy()[0]
        grad_b = dLoss[1].numpy()[0]

        # numerical gradient w.r.t. the input
        l_ap = compute_loss(tf_a + epsilon, tf_b).numpy()
        l_an = compute_loss(tf_a - epsilon, tf_b).numpy()
        grad_a_num = (l_ap - l_an) / (2 * epsilon)
        print("grad_x: {:.7f} num_grad_x {:.7f} success: {}".format(
            grad_a, grad_a_num,
            np.abs(grad_a - grad_a_num) < 1e-4))
        self.assertTrue(np.abs(grad_a - grad_a_num) < 1e-4)

        # numerical gradient w.r.t. the weights
        l_bp = compute_loss(tf_a, tf_b + epsilon).numpy()
        l_bn = compute_loss(tf_a, tf_b - epsilon).numpy()
        grad_b_num = (l_bp - l_bn) / (2 * epsilon)

        print("grad_w: {:.7f} num_grad_w {:.7f} success: {}".format(
            grad_b, grad_b_num,
            np.abs(grad_b - grad_b_num) < 1e-4))
        self.assertTrue(np.abs(grad_b - grad_b_num) < 1e-4)
コード例 #2
0
ファイル: tests.py プロジェクト: visionscaper/tf-issues
    def _enable_float32(self):
        dtype = 'float32'

        K.set_floatx(dtype)
        K.set_epsilon(1e-7)

        return dtype
コード例 #3
0
ファイル: tests.py プロジェクト: visionscaper/tf-issues
    def _enable_float16(self):
        dtype = 'float16'

        K.set_floatx(dtype)
        K.set_epsilon(1e-4)

        return dtype
コード例 #4
0
    def build_model(self, input_layer_size):
        """
        creation of a neural network for multiclass classification
        :param input_layer_size:
        :return:
        """
        K.set_floatx('float16')
        config = tensorflow.ConfigProto(device_count={'GPU': 1, 'CPU': 56})
        config.gpu_options.allow_growth = True
        config.gpu_options.per_process_gpu_memory_fraction = 0.5
        K.set_session(tensorflow.Session(config=config))

        self.model = Sequential()
        self.model.add(
            Dense(input_layer_size,
                  dtype=tensorflow.float64,
                  activation='relu'))
        self.model.add(Dense(512, activation='relu'))
        self.model.add(Dense(256, activation='relu'))
        self.model.add(Dense(128, activation='relu'))
        self.model.add(Dense(3, activation='softmax'))
        self.model.compile(loss='categorical_crossentropy',
                           optimizer='sgd',
                           metrics=['accuracy'])
        self.initial_fit()
コード例 #5
0
    def _model_tf_v1(n_input, n_output):
        from tensorflow.python.keras.layers import Input, Dense, Dropout

        session_config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))
        sess = tf.Session(config=session_config)

        K.set_floatx('float64')
        # ---------------------
        # CUSTOM CHANGES STARTS

        # a simple example
        ecg_input = Input(shape=(None, n_input), dtype='float64', name='ecg_input')

        x = Dense(16, activation='relu')(ecg_input)
        x = Dropout(0.1)(x)

        x = Dense(8, activation='relu')(x)
        x = Dropout(0.1)(x)

        bcg_out = Dense(n_output, activation='linear')(x)
        model = Model(inputs=ecg_input, outputs=bcg_out)

        # CUSTOM CHANGES ENDS
        # ---------------------

        return model
コード例 #6
0
 def __init__(self,
              seed,
              cutoff,
              loss_function="superset",
              num_epochs=5000,
              learning_rate=0.05,
              batch_size=32,
              patience=16,
              es_val_ratio=0.3,
              early_stop_interval=5,
              log_losses=True,
              hidden_layer_sizes=[32],
              activation_function="sigmoid"):
     self.network = None
     self.logger = logging.getLogger("NeuralNetSuperSetLoss")
     self.loss_history = []
     self.es_val_history = []
     self.seed = seed
     self.cutoff = cutoff
     self.loss_function = loss_function
     self.num_epochs = num_epochs
     self.learning_rate = learning_rate
     self.batch_size = batch_size
     self.patience = patience
     self.es_val_ratio = es_val_ratio
     self.early_stop_interval = early_stop_interval
     self.log_losses = log_losses
     self.hidden_layer_sizes = hidden_layer_sizes
     self.activation_function = activation_function
     K.set_floatx("float64")
コード例 #7
0
def BuildModel(dataShape, modelName, learningRate):

    K.set_floatx('float16')
    K.set_epsilon(1e-4)

    input0 = tf.keras.Input(shape=(dataShape, dataShape, 3),
                            name='input_0',
                            dtype='float16')  #Scene color
    input1 = tf.keras.Input(shape=(dataShape, dataShape, 1),
                            name='input_1',
                            dtype='float16')  #Depth 0
    input2 = tf.keras.Input(shape=(dataShape, dataShape, 1),
                            name='input_2',
                            dtype='float16')  #Depth -1
    input3 = tf.keras.Input(shape=(dataShape, dataShape, 1),
                            name='input_3',
                            dtype='float16')  #Depth -2

    modelFunc = importlib.import_module('Models.' + modelName)

    model = modelFunc.MakeModel([input0, input1, input2, input3], dataShape,
                                modelName)
    print("Loaded model from disk")

    model.compile(loss=Loss,
                  optimizer=LossScaleOptimizer(
                      RMSprop(lr=learningRate, epsilon=1e-4), 1000))

    model.summary()

    return model
コード例 #8
0
def train_conv_net_cpu(train_data, train_labels, val_data, val_labels,
                       conv_net_hyperparameters, num_processors, seed, dtype="float32", inter_op_threads=2,trial=None):
    """
    Train a convolutional neural network on the CPU.
    """
    np.random.seed(seed)
    if "get_visible_devices" in dir(tf.config.experimental):
        gpus = tf.config.experimental.get_visible_devices("GPU")
    else:
        gpus = tf.config.get_visible_devices("GPU")
    if len(gpus) > 0:
        for device in gpus:
            tf.config.experimental.set_memory_growth(device, True)
    tf.config.threading.set_inter_op_parallelism_threads(inter_op_threads)
    tf.config.threading.set_intra_op_parallelism_threads(num_processors)
    if tf.__version__[0] == "1":
        tf.set_random_seed(seed)
    else:
        tf.random.set_seed(seed)
    K.set_floatx(dtype)
    with tf.device("/CPU:0"):
        scn = ResNet(**conv_net_hyperparameters)
        history = scn.fit(train_data, train_labels, val_x=val_data, val_y=val_labels)
        #scn.fit(train_data, train_labels, val_x=val_data, val_y=val_labels)
        time_str = pd.Timestamp.now().strftime("%d/%m/%Y %I:%M:%S")
        with open('AUC_history.csv','w') as f:
            for key in history.history.keys():
                f.write("%s,%s\n"%(key,history.history[key]))
        epoch_times = scn.time_history.times
        batch_loss = np.array(scn.loss_history.losses).ravel().tolist()
        epoch_loss = np.array(scn.loss_history.val_losses).ravel().tolist()
    date_time = str(datetime.now())
    save_model(scn.model, 'goes16ci_model_cpu' + date_time + '.h5', save_format = 'h5')
    return epoch_times, batch_loss, epoch_loss
コード例 #9
0
def get_weights(save_dir: Path, model_name: str, dtype: str) -> str:
    """Download pre-trained imagenet weights for model.

    Args:
        save_dir: Path to where checkpoint must be downloaded.
        model_name: Type of image classification model, must be one of
        ("GoogleNet", "InceptionV1", "MobileNet", "MobileNetV2", "NASNetMobile", "DenseNet121",
         "ResNet50", "Xception", "InceptionV3") in all lower case.
        dtype: Data type of the network.

    Returns: Path to checkpoint file.

    """
    if isinstance(save_dir, str):
        save_dir = Path(save_dir)
    g = tf.Graph()
    with tf.Session(graph=g) as sess:
        keras_backend.set_floatx(dtype)
        keras_backend.set_session(sess)
        if model_name == "mobilenet":
            MobileNet(weights='imagenet')
            saver = tf.train.Saver()
        elif model_name == "mobilenetv2":
            MobileNetV2(weights='imagenet')
            saver = tf.train.Saver()
        elif model_name == "nasnetmobile":
            NASNetMobile(weights='imagenet')
            saver = tf.train.Saver()
        elif model_name == "densenet121":
            DenseNet121(weights='imagenet')
            saver = tf.train.Saver()
        elif model_name == "resnet50":
            ResNet50(weights='imagenet')
            saver = tf.train.Saver()
        elif model_name == "xception":
            Xception(weights='imagenet')
            saver = tf.train.Saver()
        elif model_name == "inceptionv3":
            InceptionV3(weights='imagenet')
            saver = tf.train.Saver()
        elif model_name in ("googleNet", "inceptionv1"):
            tar_file = get_file(
                fname='inceptionv1_tar.gz',
                origin=
                'http://download.tensorflow.org/models/inception_v1_2016_08_28.tar.gz'
            )
            tar_file_reader = tarfile.open(tar_file)
            tar_file_reader.extractall(save_dir)
            if dtype == 'float16':
                saver = convert_ckpt_to_fp16(
                    Path(save_dir, 'inception_v1.ckpt').as_posix())
            sess.run(tf.global_variables_initializer())
        else:
            raise ValueError("""Requested model type = %s not one of
            ["GoogleNet", "InceptionV1", "MobileNet", "MobileNetV2", "NASNetMobile", "DenseNet121",
            "ResNet50", "Xception", "InceptionV3"].""" % model_name)
        save_dir.mkdir(parents=True, exist_ok=True)
        return saver.save(sess,
                          Path(save_dir, f"{model_name}.ckpt").as_posix())
コード例 #10
0
def use_floatx(x: str):
    """Context manager to temporarily
    set the keras backend precision.
    """
    _floatx = floatx()
    set_floatx(x)
    yield
    set_floatx(_floatx)
コード例 #11
0
ファイル: napc.py プロジェクト: nicojahn/open-neural-apc
    def _set_precision(calculation_dtype, calculation_epsilon):
        # enable single/half/double precision
        K.set_floatx(calculation_dtype)
        K.set_epsilon(calculation_epsilon)

        # enable mixed precission
        if "float16" in calculation_dtype:

            mixed_precision.set_global_policy("mixed_float16")
コード例 #12
0
    def __init__(self, state_dim, action_dim, lr):
        K.set_floatx('float32')

        self.state_dim = state_dim
        self.action_dim = action_dim

        self.model = self.build_model()
        self.target = self.build_model()
        self.adam_optimizer = Adam(learning_rate=lr)
コード例 #13
0
ファイル: tf2Snekmaker.py プロジェクト: Smoss/SnekMaker
def main(use_mixed_precision=False,
         training_batch_size=TRAINING_BATCH_SIZE,
         generation_batch_size=generation_batch_size,
         generator_optimizer=generator_optimizer,
         discriminator_optimizer=discriminator_optimizer):
    if use_mixed_precision:
        print('Using Mixed Precision')
        policy = mixed_precision.Policy('mixed_float16')
        mixed_precision.set_policy(policy)
    else:
        policy = mixed_precision.Policy('float32')
        mixed_precision.set_policy(policy)
    epsilon = 1e-7
    dtype = 'float32'
    K.set_epsilon(epsilon)
    K.set_floatx(dtype)
    print(K.floatx(), K.epsilon(), training_batch_size)
    print('Compute dtype: %s' % policy.compute_dtype)
    print('Variable dtype: %s' % policy.variable_dtype)
    tf.autograph.set_verbosity(0, False)

    test_datagen = ImageDataGenerator(rescale=1. / 255)

    # if start_from_scratch:
    #     initializeSnakeIdentifier(
    #         train_datagen,
    #         test_datagen
    #     )
    snek_generator = createSnekMaker()
    snek_discriminator = initializeSnakeIdentifier()
    # snek_discriminator.predict([baby_noise, tf.constant([0]*32)])
    gan = make_gan(snek_discriminator, snek_generator)
    snek_discriminator.compile(optimizer=discriminator_optimizer,
                               loss='binary_crossentropy')

    train_df = pd.read_csv('./classes_train.csv')

    checkpoint_dir = './snek_checkpoints'
    checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
    print(checkpoint_prefix)
    checkpoint = tf.train.Checkpoint(
        generator_optimizer=generator_optimizer,
        discriminator_optimizer=discriminator_optimizer,
        snek_checker=snek_discriminator,
        snek_generator=snek_generator,
        gan=gan)
    checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir))
    # checkpoint.save(file_prefix=checkpoint_prefix)
    # trainSnekMaker(
    #     train_datagen,
    #     check_model=snek_checker,
    #     gen_model=snek_generator,
    #     train_model=train_model
    # )
    train(train_df, EPOCHS, snek_generator, snek_discriminator, gan,
          checkpoint, checkpoint_prefix)
コード例 #14
0
ファイル: GAN.py プロジェクト: eladshabi/Acgan
    def __init__(self, rows, cols, channels, classes, latent, tpu=False):

        if tpu:
            set_floatx('float16')
            set_epsilon(1e-4)

        # Input shape
        self.img_rows = rows
        self.img_cols = cols
        self.channels = channels
        self.img_shape = (self.img_rows, self.img_cols, self.channels)

        self.num_of_classes = classes

        # size of the vector to fid the generator (z)
        self.latent_dim = latent

        #optimizer = Adam(0.0002, 0.5)

        optimizer = tf.train.AdamOptimizer(0.0002, 0.5)

        loss_scale_manager = FixedLossScaleManager(5000)

        loss_scale_optimizer = LossScaleOptimizer(optimizer,
                                                  loss_scale_manager)

        losses = ['binary_crossentropy', 'sparse_categorical_crossentropy']

        # Build and compile the discriminator
        self.discriminator = self.build_discriminator()

        self.discriminator.compile(loss=losses,
                                   optimizer=loss_scale_optimizer,
                                   metrics=['accuracy'])

        # Build the generator
        self.generator = self.build_generator()

        # The generator takes noise and the target label as input
        # and generates the corresponding digit of that label
        noise = Input(shape=(self.latent_dim, ))
        label = Input(shape=(1, ))
        img = self.generator([noise, label])

        # For the combined model we will only train the generator
        self.discriminator.trainable = False

        # The discriminator takes generated image as input and determines validity
        # and the label of that image
        valid, target_label = self.discriminator(img)

        # The combined model  (stacked generator and discriminator)
        # Trains the generator to fool the discriminator
        self.combined = Model([noise, label], [valid, target_label])
        self.combined.compile(loss=losses, optimizer=loss_scale_optimizer)
コード例 #15
0
    def _set_precision(calculation_dtype, calculation_epsilon):
        # enable single/half/double precision
        import tensorflow.keras.backend as K
        K.set_floatx(calculation_dtype)
        K.set_epsilon(calculation_epsilon)

        # enable mixed precission
        if "float16" in calculation_dtype:
            import tensorflow.keras.mixed_precision as mixed_precision
            policy = mixed_precision.Policy("mixed_float16")
            mixed_precision.set_global_policy(policy)
コード例 #16
0
    def _model_tf_v1(n_input, n_output):
        """

        Initialize the tensorflow 1.1X version of the model

        :param int n_input: number of input dimensions (number of ECG + aux channels)
        :param int n_output: number of output (number of EEG channels)

        :return: initialized model
        """
        from tensorflow.python.keras.layers import Input, Bidirectional, CuDNNGRU, Dense, Dropout

        session_config = tf.ConfigProto(gpu_options=tf.GPUOptions(
            allow_growth=True))
        sess = tf.Session(config=session_config)

        K.set_floatx('float64')
        ecg_input = Input(shape=(None, n_input),
                          dtype='float64',
                          name='ecg_input')

        x = Bidirectional(
            CuDNNGRU(16,
                     return_sequences=True,
                     recurrent_regularizer=l2(0.096),
                     activity_regularizer=l2(0.030)))(ecg_input)

        x = Bidirectional(
            CuDNNGRU(16,
                     return_sequences=True,
                     recurrent_regularizer=l2(0.090),
                     activity_regularizer=l2(0.013)))(x)

        x = Dense(8, activation='relu')(x)
        x = Dropout(0.327)(x)

        x = Bidirectional(
            CuDNNGRU(16,
                     return_sequences=True,
                     recurrent_regularizer=l2(0.024),
                     activity_regularizer=l2(0.067)))(x)

        x = Bidirectional(
            CuDNNGRU(64,
                     return_sequences=True,
                     recurrent_regularizer=l2(2.48e-07),
                     activity_regularizer=l2(0.055)))(x)

        bcg_out = Dense(n_output, activation='linear')(x)
        model = Model(inputs=ecg_input, outputs=bcg_out)

        return model
コード例 #17
0
    def __init__(self, save_path=None):
        K.set_floatx('float32')

        if save_path is not None:
            self.save_path = save_path

        self.img_rows = 256
        self.img_cols = 256
        self.channels = 1

        self.AUTOTUNE = tf.data.experimental.AUTOTUNE
        self.optimizer = 'Adam'
        self.loss = 'binary_crossentropy'
        self.metrics = [tf.keras.metrics.BinaryAccuracy()]
コード例 #18
0
def main():
    # parse args --------------------------------------------------------------
    args = _parse_args()

    # set device --------------------------------------------------------------
    os.environ['CUDA_VISIBLE_DEVICES'] = args.devices

    # set learning phase ------------------------------------------------------
    K.set_learning_phase(0)

    # set data format ---------------------------------------------------------
    if args.devices == '' or args.model == 'mobilenet_v2':
        # note: tensorflow supports b01c pooling on cpu only
        K.set_image_data_format('channels_last')
    else:
        K.set_image_data_format('channels_first')

    # set dtype ---------------------------------------------------------------
    K.set_floatx(args.dtype)

    # load model --------------------------------------------------------------
    model_module = globals()[args.model]
    model_kwargs = {}
    if args.model == 'mobilenet_v2':
        model_kwargs['alpha'] = args.mobilenet_v2_alpha
    model = model_module.get_model(input_type=args.input_type,
                                   input_shape=(args.input_height,
                                                args.input_width),
                                   output_type=args.output_type,
                                   n_classes=args.n_classes,
                                   sampling=False,
                                   **model_kwargs)

    # create frozen graph
    sess = K.get_session()
    out_name = [out.op.name for out in model.outputs]
    frozen_graph = _freeze_session(sess, output_names=out_name)

    dirname = os.path.dirname(args.output_filepath)
    filename = os.path.basename(args.output_filepath)
    assert os.path.splitext(filename)[1] == '.pb'
    tf.train.write_graph(frozen_graph, dirname, filename, as_text=False)
    # tf.train.write_graph(frozen_graph, dirname, filename, as_text=False)

    # store input and output names as json file
    write_json(
        args.output_filepath + '.json', {
            'input_names': [input.op.name for input in model.inputs],
            'output_names': [output.op.name for output in model.outputs]
        })
コード例 #19
0
    def __init__(self, 
                 data: Tuple[tf.Tensor, tf.Tensor], 
                 m: int = 20, 
                 d: int = 1,
                 alpha: np.float = 1./np.sqrt(2.), 
                 eps_sq: np.float = 1,
                 sigma_n_sq: np.float = 1,
                 sigma_f_sq: np.float = 1,
                 dir_weights: str = None):
                    
        if data[1].dtype == np.float64:
            K_bd.set_floatx('float64')
        else:
            set_default_float(np.float32)

        self.num_data = tf.cast(data[1].shape[0], default_float())
        self.data = (tf.cast(data[0], default_float()), tf.cast(data[1], default_float()))
        self.const = tf.cast(0.5*data[1].size*np.log(2*np.pi), default_float())
               
        self.flag_1d = d == 1
        self.alpha = tf.cast(alpha, default_float())
        self.alpha_sq = tf.square(self.alpha)
        self.m = tf.cast(m, default_float())
        self.this_range = tf.constant(np.asarray(list(product(range(1, m + 1), repeat=d))).squeeze(), dtype=default_float())
        self.this_range_1 = self.this_range - 1.
        self.this_range_1_2 = self.this_range_1 if self.flag_1d else tf.range(m, dtype=default_float())
        self.this_range_1_int = tf.cast(self.this_range_1, tf.int32)
        self.tf_range_dnn_out = tf.range(d)
        self.this_range_1_ln2 = np.log(2.)*self.this_range_1

        self.vander_range = tf.range(m+1, dtype=default_float())
        self.eye_k = tf.eye(m**d, dtype=default_float())
        self.yTy = tf.reduce_sum(tf.math.square(self.data[1])) 
        self.coeff_n_tf = tf.constant(np.load(os.path.dirname(os.path.realpath(__file__)) + '/hermite_coeff.npy')[:m, :m], dtype=default_float())
        
        eps_sq = eps_sq*np.ones(d) if d > 1 else eps_sq       
        self.eps_sq = Parameter(eps_sq, transform=positive(), dtype=default_float())
        self.sigma_f_sq = Parameter(sigma_f_sq, transform=positive(), dtype=default_float())
        self.sigma_n_sq = Parameter(sigma_n_sq, transform=positive(), dtype=default_float())
       
        model = models.Sequential()
        model.add(layers.Dense(512, activation='tanh', input_dim=data[0].shape[1]))        
        model.add(layers.Dense(256, activation='tanh'))
        model.add(layers.Dense(64, activation='tanh'))
        model.add(layers.Dense(d))      
        
        if dir_weights is not None:
            model.load_weights(dir_weights)
        self.neural_net = model
コード例 #20
0
 def __init__(self, content: np.ndarray, style: np.ndarray):
     tf.compat.v1.disable_eager_execution()
     K.set_floatx('float64')
     self.content = content
     self.style = style
     print("   Building transfer model.")
     self.contentTensor = K.variable(self.content)
     self.styleTensor = K.variable(self.style)
     self.genTensor = K.placeholder((1, CONTENT_IMG_H, CONTENT_IMG_W, 3))
     self.inputTensor = K.concatenate(
         [self.contentTensor, self.styleTensor, self.genTensor], axis=0)
     self.model = vgg19.VGG19(include_top=False,
                              input_tensor=self.inputTensor)
     self.totalLoss = self.constructTotalLoss()
     self.gradient = self.constructGradient()
     self.kerasFunction = self.constructKerasFunction()
     self.runOutput = None
コード例 #21
0
    def __init__(self,
                 field_size=32,
                 name="DenseNN",
                 loss=kls.MeanSquaredError()):
        Inputshape = (field_size, field_size, 2)

        K.set_floatx('float32')
        model = km.Sequential(name=name)
        model.add(
            kl.Flatten(input_shape=Inputshape, data_format="channels_last"))
        model.add(kl.Dense(32, activation='sigmoid', name="Dense1"))
        model.add(kl.Dense(32, activation='sigmoid', name="Dense2"))
        model.add(kl.Dense(1, activation='linear', name="Prediction"))

        model.compile(optimizer=ko.Adam(), loss=loss)

        super().__init__(model, loss=loss)
コード例 #22
0
def train_bertffn(model_path='models/bertffn_crossentropy/bertffn',
                  data_path='data/mqa_csv',
                  num_epochs=20,
                  num_gpu=1,
                  batch_size=64,
                  learning_rate=2e-5,
                  validation_split=0.2,
                  loss='categorical_crossentropy',
                  pretrained_path='pubmed_pmc_470k/',
                  max_seq_len=256):
    tf.compat.v1.disable_eager_execution()
    if loss == 'categorical_crossentropy':
        loss_fn = qa_pair_cross_entropy_loss
    else:
        loss_fn = qa_pair_loss
    K.set_floatx('float32')
    tokenizer = FullTokenizer(os.path.join(pretrained_path, 'vocab.txt'))
    d = create_dataset_for_bert(data_path,
                                tokenizer=tokenizer,
                                batch_size=batch_size,
                                shuffle_buffer=500000,
                                dynamic_padding=True,
                                max_seq_length=max_seq_len)
    eval_d = create_dataset_for_bert(data_path,
                                     tokenizer=tokenizer,
                                     batch_size=batch_size,
                                     mode='eval',
                                     dynamic_padding=True,
                                     max_seq_length=max_seq_len,
                                     bucket_batch_sizes=[64, 64, 64])

    medical_qa_model = MedicalQAModelwithBert(
        config_file=os.path.join(pretrained_path, 'bert_config.json'),
        checkpoint_file=os.path.join(pretrained_path, 'biobert_model.ckpt'))
    optimizer = tf.keras.optimizers.Adam(lr=learning_rate)
    medical_qa_model.compile(optimizer=optimizer,
                             loss=loss_fn,
                             metrics=[qa_pair_batch_accuracy])

    epochs = num_epochs
    loss_metric = tf.keras.metrics.Mean()

    medical_qa_model.fit(d, epochs=epochs)
    medical_qa_model.summary()
    medical_qa_model.save_weights(model_path)
    medical_qa_model.evaluate(eval_d)
コード例 #23
0
ファイル: run.py プロジェクト: RSteendam/quantization
def set_precision(precision):
    if precision == 'float16':
        dtype = 'float16'
        K.set_floatx(dtype)
        # default is 1e-7 which is too small for float16.  Without adjusting the epsilon, we will get NaN predictions because of divide by zero problems
        K.set_epsilon(1e-4)
        print_debug('Compute dtype: %s' % 'float16')
        print_debug('Variable dtype: %s' % 'float16')
    elif precision == 'mixed':
        policy = mixed_precision.Policy('mixed_float16')
        mixed_precision.set_policy(policy)
        print_debug('Compute dtype: %s' % policy.compute_dtype)
        print_debug('Variable dtype: %s' % policy.variable_dtype)
    else:
        policy = mixed_precision.Policy('float32')
        mixed_precision.set_policy(policy)
        print_debug('Compute dtype: %s' % policy.compute_dtype)
        print_debug('Variable dtype: %s' % policy.variable_dtype)
コード例 #24
0
 def __init__(self,
              keras_input_layer,
              alpha=1,
              stddev=1.0,
              use_float64_ops=False,
              **kwargs):
     super(RBMBase, self).__init__(keras_input_layer, **kwargs)
     self.use_float64_ops = use_float64_ops
     self.layers_dtype = tensorflow.complex128 if self.use_float64_ops else tensorflow.complex64
     self.alpha = alpha
     if self.use_float64_ops:
         K.set_floatx('float64')
         x = ToComplex128()(self.keras_input_layer)
     else:
         x = ToComplex64()(self.keras_input_layer)
     self.initializer = tensorflow.random_normal_initializer(
         stddev=stddev, dtype=self.layers_dtype.real_dtype)
     self._predictions = self.build_predictions(x)
コード例 #25
0
ファイル: CNN_regression.py プロジェクト: jahanpd/CXR_CNN
    def __init__(self, image_paths, labels, test_x, test_y, save_path=None):
        K.set_floatx('float32')
        self.x = image_paths
        self.y = labels
        self.xt = test_x
        self.yt = test_y

        if save_path is not None:
            self.save_path = save_path

        self.img_rows = 256
        self.img_cols = 256
        self.channels = 1

        self.AUTOTUNE = tf.data.experimental.AUTOTUNE
        self.optimizer = 'Adam'
        self.loss = 'mse'
        self.metrics = ['mse']
コード例 #26
0
    def build_model(self, input_layer_size):
        K.set_floatx('float32')

        # sess_gpu = tf.Session(config=tf.ConfigProto(log_device_placement=True))
        print(tf.test.is_gpu_available())
        from tensorflow.python.client import device_lib

        sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
        adam = Adam(learning_rate=0.000007)
        model = Sequential()


        model.add(Dense(input_layer_size, kernel_initializer='uniform', activation='relu'))
        model.add(Dropout(0.1))
        model.add(Dense(int(input_layer_size / 2), kernel_initializer='uniform', activation='relu'))
        model.add(Dropout(0.1))
        model.add(Dense(64, kernel_initializer='uniform', activation='relu'))
        model.add(Dropout(0.1))
        model.add(Dense(32, kernel_initializer='uniform', activation='relu'))
        model.add(Dropout(0.1))
        model.add(Dense(8, kernel_initializer='uniform', activation='relu'))
        model.add(Dense(1, kernel_initializer='uniform', activation='sigmoid'))
        # model.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy'])
        model.compile(loss='binary_crossentropy', optimizer=adam, metrics=['accuracy'])
        self.model = model
        X_train, X_test, Y_train, Y_test = self.get_train_test_data(self.X, self.Y)




        history = self.model.fit(
            X_train, Y_train, epochs=20, batch_size=64
            , validation_data=(X_test, Y_test)
        )
        self.save_model()

        print(X_test[0])
        y_pred = self.model.predict(X_test)
        print(history.history)
        print("classification_report\n", classification_report(Y_test, y_pred.round()))
        print("accuracy_score\n", accuracy_score(Y_test, y_pred.round()))
        # print(confusion_matrix(X_test, Y_test))
        self.model.evaluate(X_test, Y_test, verbose=1)
コード例 #27
0
    def __init__(self,
                 n_input=1,
                 n_output=63,
                 lr=1e-3,
                 opt_type='adam',
                 **kwargs):
        """
        Constructor for RNN model

        :param int n_input: number of input dimensions (number of ECG + aux channels)
        :param int n_output: number of output (number of EEG channels)
        :param float lr: learning rate
        :param str opt_type: chosen type of optimizer, allowed to be adam, rmsprop or sgd
        :param kwargs: clipnorm: normalized value for gradient clipping
                       clipvalue: numerical value for gradient clipping

                       and other Keras optimizier parameters for the chosen optimizer

        :return: initialized model object

        """
        super().__init__()
        self.name = 'default_rnn_model'
        self.lr = lr
        self.opt_type = opt_type.lower()
        self.n_input = n_input
        self.n_output = n_output

        K.set_floatx('float64')

        if opt_type.lower() == 'adam':
            self.optimizer = optimizers.Adam(lr=lr, **kwargs)

        elif opt_type.lower() == 'rmsprop':
            self.optimizer = optimizers.RMSprop(lr=lr, **kwargs)

        elif opt_type.lower() == 'sgd':
            self.optimizer = optimizers.SGD(lr=lr, **kwargs)

        else:
            raise NotImplementedError
コード例 #28
0
def make_gps_mlp(input_size, output_size, config):
    global hidden_layer_nodes
    K.set_floatx('float64')

    input = Input(shape=(input_size, ))
    x = Dense(units=hidden_layer_nodes, activation='relu')(input)
    x = Dense(units=hidden_layer_nodes, activation='relu')(x)
    mean = Dense(units=output_size, activation='linear')(x)
    # mean = Dense(units=output_size, activation='tanh')(x)
    # std  = Dense(units=output_size, activation='softplus')(x)

    # action_out = Lambda(action)([mean, std])
    action_out = Lambda(gps_action)(mean)
    model_act = Model(inputs=input, outputs=action_out)

    if config['train']:
        act_taken = Input(shape=(output_size, ))
        guiding_prob = Input(shape=(1, ))
        # log_prob_out = Lambda(log_prob)([mean, std, act_taken])
        prob_out = Lambda(prob)([mean, act_taken])

        # Model for full training
        reg_weight = K.variable(1e-2, name='regularization_weight')
        model_train = Model(inputs=[input, act_taken, guiding_prob],
                            outputs=prob_out)
        adam1 = Adam(lr=config['lr'])
        model_train.compile(loss=gps_loss(reg_weight, guiding_prob),
                            optimizer=adam1)

        # Model for pretraining
        model_pretrain = Model(inputs=[input, act_taken], outputs=prob_out)
        # adam2 = Adam(lr=config['lr'])
        adam2 = Adam(lr=0.001)
        model_pretrain.compile(loss=binary_crossentropy, optimizer=adam2)
    else:
        model_train = None
        model_pretrain = None
        reg_weight = None

    return model_act, model_train, model_pretrain, reg_weight
コード例 #29
0
def train_conv_net_cpu(train_data,
                       train_labels,
                       val_data,
                       val_labels,
                       conv_net_hyperparameters,
                       num_processors,
                       seed,
                       dtype="float32",
                       inter_op_threads=2):
    """
    Train a convolutional neural network on the CPU.
    """
    np.random.seed(seed)
    if "get_visible_devices" in dir(tf.config.experimental):
        gpus = tf.config.experimental.get_visible_devices("GPU")
    else:
        gpus = tf.config.get_visible_devices("GPU")
    if len(gpus) > 0:
        for device in gpus:
            tf.config.experimental.set_memory_growth(device, True)
    tf.config.threading.set_inter_op_parallelism_threads(inter_op_threads)
    tf.config.threading.set_intra_op_parallelism_threads(num_processors)
    if tf.__version__[0] == "1":
        tf.set_random_seed(seed)
    else:
        tf.random.set_seed(seed)
    K.set_floatx(dtype)
    with tf.device("/CPU:0"):
        scn = ResNet(**conv_net_hyperparameters)
        history = scn.fit(train_data,
                          train_labels,
                          val_x=val_data,
                          val_y=val_labels)
        history
        print(history)
        epoch_times = scn.time_history.times
        batch_loss = np.array(scn.loss_history.losses).ravel().tolist()
        epoch_loss = np.array(scn.loss_history.val_losses).ravel().tolist()
    return epoch_times, batch_loss, epoch_loss
コード例 #30
0
ファイル: CNN_classification.py プロジェクト: jahanpd/CXR_CNN
    def __init__(self, image_paths, labels, save_path=None):
        K.set_floatx('float32')
        self.image_paths = image_paths
        self.labels = labels

        if save_path is not None:
            self.save_path = save_path

        self.img_rows = 256
        self.img_cols = 256
        self.channels = 1

        self.AUTOTUNE = tf.data.experimental.AUTOTUNE
        self.optimizer = 'Adam'
        self.loss = 'binary_crossentropy'
        self.metrics = [
            tf.keras.metrics.AUC(),
            tf.keras.metrics.FalseNegatives(),
            tf.keras.metrics.FalsePositives(),
            tf.keras.metrics.TrueNegatives(),
            tf.keras.metrics.TruePositives()
        ]