Exemple #1
0
    def showVoxImages(self, epoch, test_input=None, num_examples=0):
        if (num_examples > 0):
            test_input = tf.random.normal([num_examples, self.latent_dim])

        predictions = self.gen_model(test_input, training=False)
        for i in range(test_input.shape[0]):
            gen_vox = predictions.numpy()[i, ..., 0]
            ut.plotVox(gen_vox, title='Epoch {}'.format(epoch), save_fig=True)
Exemple #2
0
def interpolateDesigns(anchor_vects, labels, index1, index2, divs=10) :
    mids_string = ' {} , {} '.format(labels[index1].numpy().decode(), labels[index2].numpy().decode())
    print(mids_string)
    interp_vects = ut.interp(anchor_vects[index1].numpy(), anchor_vects[index2].numpy(), divs)
    
    v = model.sample(interp_vects)
    v = v.numpy()[:,:,:,:,0]
    for i, sample in enumerate(v) :    
        ut.plotVox(sample, step=1, threshold=0.5, limits=cf_limits, show_axes=False)
Exemple #3
0
def descRecon(index, title='') :
    enc_text = enc_descs[index]
    pred = modeltxt.model(enc_text[None,...])
    print('\nPredicted vector: \n', pred, '\n')
    print('Label vector: \n', label_vect[index])
    l = modeltxt.compute_loss(label_vect[index], pred).numpy()[0]
    mid = mids[index]
    print('\nStats for this comparison\n{:3d}  Loss: {:.3f}  Sum pred: {:.3f}  Sum lab: {:.3f}\n  Mid : {}\n'.format(index, l, np.sum(pred), np.sum(label_vect[index]), mid))
    
    vox_gen = model.sample(pred).numpy()[0,...,0]
    vox_lab = model.sample(label_vect[index][None,...]  ).numpy()[0,...,0]
    
    ut.plotVox(vox_lab, title='Org {}'.format(title), limits=cf_limits)
    ut.plotVox(vox_gen, title='Rec {}'.format(title), limits=cf_limits)
Exemple #4
0
def getRecons(num_to_get=10, cat_label_index=-2) :
    model.training = False
    anchors, labels = [],[]
    for anchor, label in train_dataset.unbatch().shuffle(100000).take(num_to_get*50) :
        catid = -1
        try: catid = cf_cat_prefixes.index('0{}'.format(ut.getMidCat(label.numpy().decode())))
        except : print('not found\n ', label.numpy().decode())
        if (catid == cat_label_index or cat_label_index==-2) :
            anchor = tf.cast(anchor, dtype=tf.float32)
            anchors.append(anchor)
            labels.append(label)
        if (len(anchors) >= num_to_get) :
            break
    
    anchor_vects = [model.encode(anchors[i][None,:,:,:], reparam=True) for i in range(len(anchors))]
    v = [model.sample(anchor_vects[i]).numpy()[0,...,0] for i in range (len(anchors))]
    
    for i, sample in enumerate(v) :
        print('Index: {}   Mid: {}'.format(i, labels[i].numpy().decode()))
        ut.plotVox(anchors[i].numpy()[...,0], step=1, threshold=0.5, title='Index {} Original'.format(i), limits=cf_limits)
        ut.plotVox(v[i], step=2, threshold=0.5, title='Index {} Reconstruct'.format(i), limits=cf_limits) 
        
    print([mid.numpy().decode() for mid in labels])
    return anchor_vects, labels
Exemple #5
0
def showRandIndices(num_to_show=100) :
    for i in np.random.randint(0, len(shape2vec), size=num_to_show) :
        vox = shapemodel.decode(shape2vec[mids[i]][None,...], apply_sigmoid=True)[0,...,0]    
        ut.plotVox(vox, step=2, limits = cf_limits, title=i)
Exemple #6
0
#%% Setup datasets
voxs_stacked = np.stack(all_voxs, axis=0)
train_dataset = tf.data.Dataset.from_tensor_slices((voxs_stacked, all_mids))
for test_samples, test_labels in train_dataset.batch(50).take(1) : pass
test_samples = tf.cast(test_samples, dtype=tf.float32)

train_dataset, test_dataset = ut.splitData(train_dataset, 0.1)
train_dataset = train_dataset.batch(cf_batch_size, drop_remainder=True)
test_dataset = test_dataset.batch(cf_batch_size, drop_remainder=False)

total_train_batchs = 0
for _ in train_dataset : total_train_batchs += 1

#%% Show initial models
sample_index = 16
ut.plotVox(test_samples[sample_index], title='Original', threshold=0.5, limits=cf_limits, save_fig=False)
if (lg.total_epochs > 10) : ut.plotVox(model.reconstruct(test_samples[sample_index][None,...], training=False), limits=cf_limits, title='Recon')

#%% Training methods
def getTestSetLoss(dataset, batches=0) :
    test_losses = []
    for test_x, test_label in (dataset.take(batches).shuffle(100) if batches > 0 else dataset.shuffle(100)) :
        test_x = tf.cast(test_x, dtype=tf.float32)
        test_loss_batch = model.compute_loss(test_x)
        test_losses.append(test_loss_batch)
    return np.mean(test_losses)

def trainModel(epochs, display_interval=-1, save_interval=10, test_interval=10) :
    print('\n\nStarting training...\n')   
    model.training=True
    for epoch in range(1, epochs + 1):
Exemple #7
0
lg.restoreCP()


# Method for going from text to voxels
def getVox(text):
    ptv = padEnc(text)
    preds = txtmodel.sample(ptv)
    vox = shapemodel.sample(preds).numpy()[0, ..., 0]
    return vox


#%% Test text2shape model
for i in range(20):
    text = input('Text description: ')
    vox = getVox(text)
    ut.plotVox(vox, limits=cf_limits)

#%% Run on single line of text
text = 'ceiling lamp that is very skinny and very tall. it has one head. it has a base. it has one chain.'
tensor = tf.constant(text)
tbatch = tensor[None, ...]
preds = txtmodel.model(tbatch)

#%% Generate a balanced set of sample descriptions to show on streamlit app
ex_descs = []
for keyword in [
        'Table', 'Chair', 'Lamp', 'Faucet', 'Clock', 'Bottle', 'Vase',
        'Laptop', 'Bed', 'Mug', 'Bowl'
]:
    for i in range(50):
        desc = dnp[np.random.randint(0, len(dnp))]
Exemple #8
0
    enc_text = padEnc(text)
    pred_sv = modeltxt.model(np.array(enc_text))
    vox = modeltxt.sample(pred_sv).numpy()[0,...,0]
    return vox, pred_sv

def descRecon(index, title='') :
    enc_text = enc_descs[index]
    pred = modeltxt.model(enc_text[None,...])
    print('\nPredicted vector: \n', pred, '\n')
    print('Label vector: \n', label_vect[index])
    l = modeltxt.compute_loss(label_vect[index], pred).numpy()[0]
    mid = mids[index]
    print('\nStats for this comparison\n{:3d}  Loss: {:.3f}  Sum pred: {:.3f}  Sum lab: {:.3f}\n  Mid : {}\n'.format(index, l, np.sum(pred), np.sum(label_vect[index]), mid))
    
    vox_gen = model.sample(pred).numpy()[0,...,0]
    vox_lab = model.sample(label_vect[index][None,...]  ).numpy()[0,...,0]
    
    ut.plotVox(vox_lab, title='Org {}'.format(title), limits=cf_limits)
    ut.plotVox(vox_gen, title='Rec {}'.format(title), limits=cf_limits)

#%%
for i in range(250, 260) : 
    descRecon(i, title=i)
    
#%% Generate shapes from descriptions
desc_test = text = 'lamp that is a table or floor lamp that is made of a lamp unit, a lamp body and a lamp base. the lamp body is made of a lamp pole. the lamp base which is the lamp holistic base is made of a lamp base part. the object is long in length and regular in height. it is regular in width. it is square in shape. '
enc_text_test = padEnc(desc_test)
pred = modeltxt.model(enc_text_test)
vox_gen = model.sample(pred).numpy()[0,...,0]
ut.plotVox(vox_gen, limits=cf_limits)
Exemple #9
0
    for folder in os.listdir(in_fp)
]
move_to = [
    '{}/{}{}{{}'.format(out_fp, prefix, i + 1, file_ext)
    for i in range(len(files_to_move))
]

for i in range(len(files_to_move)):
    os.rename(files_to_move[i], move_to[i])

#%% Look at the difference between original and grown voxel models
for sample, _ in train_dataset.unbatch().shuffle(10000).take(3):
    vox = sample[..., 0].numpy()
    sparsity = ut.getSparsity(vox)
    ut.plotVox(vox,
               limits=cf_limits,
               title='Original\n Sparse: {:.2f}'.format(100 * sparsity))
    if (sparsity > .00): continue
    grown = ut.growVox(vox, amount=0.5)
    sparsity_grown = ut.getSparsity(grown)
    ut.plotVox(grown,
               limits=cf_limits,
               title='Grown\n Sparse: {:.2f}'.format(100 * sparsity_grown))

#%%
import shutil
import tqdm

rdir = '/home/starstorms/Insight/ShapeNet/partnetmeta/renders/data_v0'
outdir = '/home/starstorms/Insight/ShapeNet/partnetmeta/pics'