Example #1
0
def main(args):
    c = utils.make_config(args)
    d = input_pipeline.DataInputPipeline(args.inputs, args.vocab, args.labels,
                                         c)

    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    #    gpu_options = tf.GPUOptions(allow_growth=True)
    sess = tf.Session(
    )  #config=tf.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True))

    m = models.Model(c, sess, d)

    print 'INFO: loading model from checkpoint...'
    m.load(dir=args.checkpoint)
    print 'INFO: done!'

    sess.run(tf.global_variables_initializer())

    print 'INFO: starting inference...'
    prog = utils.Progbar(target=d.get_num_batches())

    source_out = {'source': [], 'len': [], 'attn': [], 'ids': []}
    sales_out = {'label': [], 'pred': []}
    price_out = {'label': [], 'pred': []}
    shop_out = {'label': [], 'pred': []}
    category_out = {'label': [], 'pred': []}
    loss_out = []

    for i, batch in enumerate(d.batch_iter()):
        sales_hat, price_hat, shop_hat, category_hat, loss, attn = \
            m.test_on_batch(*batch[:-1])

        prog.update(i, [('train loss', loss)])

        # record results
        source, source_len, log_sales, price, shop, category, ids = batch

        source_out['source'] += source
        source_out['len'] += source_len
        source_out['attn'] += attn.tolist()
        source_out['ids'] += ids

        sales_out['label'] += log_sales
        sales_out['pred'] += sales_hat.tolist()

        price_out['label'] += price
        price_out['pred'] += price_hat.tolist()

        shop_out['label'] += shop
        shop_out['pred'] += shop_hat.tolist()

        category_out['label'] += category
        category_out['pred'] += category_hat.tolist()

        loss_out += loss

    if args.output is not None:
        print '\nINFO: dumping output to ', args.output
        write_pickle(source_out, args.output)

    print 'INFO:  done \(^o^)/'
from utils import make_config

sys.path.append('..')
from seismiqb import SeismicGeometry



# Help message
MSG = """Convert SEG-Y cube to HDF5.
Input SEG-Y file must have correctly filled `INLINE_3D` and `CROSSLINE_3D` headers.
A lot of various statistics about traces are also inferred and stored in the resulting file,
so this script takes some time.
"""

# Argname, description, dtype, default
ARGS = [
    ('cube-path', 'path to the SEG-Y cube to convert to HDF5', str, None),
]


if __name__ == '__main__':
    config = make_config(MSG, ARGS, os.path.basename(__file__).split('.')[0])

    geometry = SeismicGeometry(
        config['cube-path'],
        headers=SeismicGeometry.HEADERS_POST_FULL,
        index_headers=SeismicGeometry.INDEX_POST,
        collect_stats=True, spatial=True,
    )
    geometry.make_hdf5()
Example #3
0
def main(args):
    if not os.path.exists(args.checkpoint):
        os.makedirs(args.checkpoint)

    c = utils.make_config(args)

    d = input_pipeline.DataInputPipeline(args.inputs, args.vocab, args.labels,
                                         c)

    os.environ[
        'CUDA_VISIBLE_DEVICES'] = args.gpu  # Or whichever device you would like to use
    #    gpu_options = tf.GPUOptions(allow_growth=True)
    sess = tf.Session(
    )  #config=tf.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True))

    m = models.Model(c, sess, d)

    sess.run(tf.global_variables_initializer())

    if args.train:
        print 'INFO: starting training...'
        prog = utils.Progbar(target=d.get_num_batches())
        epoch = 1
        for _ in range(c.num_epochs):
            epoch_loss = 0
            for i, batch in enumerate(d.batch_iter()):
                sales_hat, price_hat, shop_hat, category_hat, loss = \
                    m.train_on_batch(*batch[:-1])
                prog.update(i + 1, [('train loss', loss)])
                epoch_loss += loss
            print '\n INFO: EPOCH ', epoch, ' MEAN LOSS: ', epoch_loss / float(
                d.get_num_batches())
            print 'INFO: saving checkpoint...'
            m.save(args.checkpoint)
            print 'INFO: ...done!'
            epoch += 1
    else:
        print 'INFO: loading model from checkpoint...'
        m.load(dir=args.checkpoint)
        print 'INFO: done!'

    if args.output is not None:
        print 'INFO: starting inference...'
        prog = utils.Progbar(target=d.get_num_batches())

        source_out = {'source': [], 'len': [], 'attn': [], 'ids': []}
        sales_out = {'label': [], 'pred': []}
        price_out = {'label': [], 'pred': []}
        shop_out = {'label': [], 'pred': []}
        category_out = {'label': [], 'pred': []}
        loss_out = []

        for i, batch in enumerate(d.batch_iter()):
            sales_hat, price_hat, shop_hat, category_hat, loss, attn = \
                m.test_on_batch(*batch[:-1])

            prog.update(i, [('train loss', loss)])

            # record results
            source, source_len, log_sales, price, shop, category, ids = batch

            source_out['source'] += source
            source_out['len'] += source_len
            source_out['attn'] += attn.tolist()
            source_out['ids'] += ids

            sales_out['label'] += log_sales
            sales_out['pred'] += sales_hat.tolist()

            price_out['label'] += price
            price_out['pred'] += price_hat.tolist()

            shop_out['label'] += shop
            shop_out['pred'] += shop_hat.tolist()

            category_out['label'] += category
            category_out['pred'] += category_hat.tolist()

            loss_out += loss

        print '\nINFO: dumping output to ', args.output
        utils.write_pickle(source_out, args.output)

    print '\n\nINFO:  \(^o^)/   done    \(^o^)/'