Exemple #1
0
# In[8]:


import importlib


# In[9]:


importlib.reload(tsentiLoader)


# In[10]:


train_reader = tsentiLoader.tSentiReader(train_set[:10000], 20, tt)
valid_reader = tsentiLoader.tSentiReader(train_set[10000:10100], 20, tt)
test_reader =  tsentiLoader.tSentiReader(test_set, 20, tt)


# In[44]:


class SentimentModel(nn.Module):
    """
        Input: the outputs of bert
        Model: BiLSTM
        Output: sentence embedding
    """
    def __init__(   self,
                    bert ,
Exemple #2
0
bert = bb.cuda()
transformer = transformer.cuda()
task_embedding = task_embedding.cuda()
senti_cls = nn.Linear(768, 2).cuda()
rdm_model = RDM_Model(768, 300, 256, 0.2).cuda()
rdm_classifier = nn.Linear(256, 2).cuda()
cm_model = CM_Model(300, 256, 2).cuda()

# In[13]:

# #### 各个任务的数据
train_file = "./trainingandtestdata/training.1600000.processed.noemoticon.csv"
test_file = "./trainingandtestdata/testdata.manual.2009.06.14.csv"
train_set, test_set = tsentiLoader.load_data(train_file, test_file)

senti_train_reader = tsentiLoader.tSentiReader(train_set[:10000], 20, tt)
senti_train_reader.label = np.delete(senti_train_reader.label, 1, axis=2)
senti_valid_reader = tsentiLoader.tSentiReader(train_set[10000:10500], 20, tt)
senti_valid_reader.label = np.delete(senti_valid_reader.label, 1, axis=2)

load_data_fast()

if torch.cuda.device_count() > 1:
    # device_ids = [int(device_id) for device_id in sys.argv[1].split(",")]
    device_ids = list(range(len(sys.argv[1].split(","))))
    bert = nn.DataParallel(bert, device_ids=device_ids)
    transformer = nn.DataParallel(transformer, device_ids=device_ids)

    device_name = "cuda:%d" % device_ids[0]
    device = torch.device(device_name)
    bert.to(device)
Exemple #3
0
rdm_model = RDM_Model(768, 300, 256, 0.2).cuda()
rdm_classifier = nn.Linear(256, 2).cuda()
cm_model = CM_Model(300, 256, 2).cuda()
rdm_classifier = nn.Linear(256, 2).cuda()
cm_log_dir = "MTLERD"

subj_file = "./rotten_imdb/subj.data"
obj_file = "./rotten_imdb/obj.data"
tr, dev, te = SubjObjLoader.load_data(subj_file, obj_file)

subj_train_reader = SubjObjLoader.SubjObjReader(tr, 20, tokenizer)
train_file = "./trainingandtestdata/training.1600000.processed.noemoticon.csv"
test_file = "./trainingandtestdata/testdata.manual.2009.06.14.csv"
train_set, test_set = tsentiLoader.load_data(train_file, test_file)

senti_train_reader = tsentiLoader.tSentiReader(train_set[:10000], 20,
                                               tokenizer)
senti_train_reader.label = np.delete(senti_train_reader.label, 1, axis=2)

load_data_fast()

if torch.cuda.device_count() > 1:
    # device_ids = [int(device_id) for device_id in sys.argv[1].split(",")]
    device_ids = list(range(len(sys.argv[1].split(","))))
    bert = nn.DataParallel(bert, device_ids=device_ids)
    transformer = nn.DataParallel(transformer, device_ids=device_ids)

    device_name = "cuda:%d" % device_ids[0]
    device = torch.device(device_name)
    bert.to(device)
    transformer.to(device)