-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathText Classification
More file actions
267 lines (172 loc) · 8.49 KB
/
Text Classification
File metadata and controls
267 lines (172 loc) · 8.49 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
!pip install transformers
#not required since you can set colab to run on GPU- check runtime tab
# pip install tf-nightly-gpu
# Import Packages
import tensorflow as tf
import tensorflow_hub as hub
import pandas as pd
from sklearn.model_selection import train_test_split
import numpy as np
import re
import unicodedata
import nltk
from nltk.corpus import stopwords
from tensorflow import keras
from tensorflow.keras.layers import Dense,Dropout, Input
from tqdm import tqdm
import pickle
from sklearn.metrics import confusion_matrix,f1_score,classification_report
import matplotlib.pyplot as plt
import itertools
from sklearn.utils import shuffle
from tensorflow.keras import regularizers
from transformers import *
from transformers import BertTokenizer, TFBertModel, BertConfig,TFDistilBertModel,DistilBertTokenizer,DistilBertConfig
import plotly.express as px
from sklearn.model_selection import train_test_split
import plotly.graph_objects as go
import seaborn as sns
# Preprocessing & Cleaning Functions
def unicode_to_ascii(s):
return ''.join(c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn')
def clean_stopwords_shortwords(w):
stopwords_list=stopwords.words('english')
words = w.split()
clean_words = [word for word in words if (word not in stopwords_list) and len(word) > 2]
return " ".join(clean_words)
def preprocess_sentence(w):
w = unicode_to_ascii(w.lower().strip())
w = re.sub(r"([?.!,¿])", r" ", w)
w = re.sub(r'[" "]+', " ", w)
w = re.sub(r"[^a-zA-Z?.!,¿]+", " ", w)
w=clean_stopwords_shortwords(w)
w=re.sub(r'@\w+', '',w)
return w
# Load dataset
Training sets were combined into one csv and the text was split into 2 columns (Data tab, text to columns , delimited with :)
# import google drive
from google.colab import drive
drive.mount('/content/drive/')
# Change directory to google drive- Just upload the file right into the drive you want(Uchennamachine) for easy access
%cd /content/drive/My Drive/
data = pd.read_csv("combinedfellow.csv",encoding='latin-1')
data
Removing Unnamed Columns, dropping NaN data and resetting the index after dropping some rows/columns containing NaN dataset and finally shuffling the dataset
data = data.loc[:, ~data.columns.str.contains('Unnamed: 2', case=False)]
data = data.loc[:, ~data.columns.str.contains('Unnamed: 3', case=False)]
data = data.loc[:, ~data.columns.str.contains('Unnamed: 4', case=False)]
print('File has {} rows and {} columns'.format(data.shape[0],data.shape[1]))
data=data.dropna()
data=data.reset_index(drop=True)
print('File has {} rows and {} columns'.format(data.shape[0],data.shape[1]))
data = shuffle(data)
data
converting Labels to '0' ,'1','2',...till '6' saving it to the 'gt' (ground truth) column and applying the preprocess function to the dataset
data['gt'] = data['Label'].map({'LOC':0,'HUM':1,'NUM':2,'DESC':3,'ABBR':4,'ENTY':5})
print('Available labels: ',data.Label.unique())
# data['Text']=data['Text'].map(preprocess_sentence)
num_classes=len(data.Label.unique())
data.head()
data.dtypes
# data["Label"] = data["Label"].astype(object).astype(str)
# data.dtypes
# Explore the number of unique values for each feature
for col in data.columns:
print('{}: {} unique values'.format(col, data[col].nunique()))
# Display the 6 Unique Values
data['Label'].unique()
# Drop the Labels Column
data.drop(['Label'], axis=1)
# Loading DistilBERT Tokenizer and the DistilBERT model
dbert_tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased')
dbert_model = TFDistilBertModel.from_pretrained('distilbert-base-uncased')
# Preparing input for the model
max_len=32
sentences=data['Text']
labels=data['gt']
len(sentences),len(labels)
# Let's take a sentence from the dataset and understand the input and output of the DistilBERT
Tokenized sentence
dbert_tokenizer.tokenize(sentences[0])
Input ids and the attention masks from the tokenizer
dbert_inp=dbert_tokenizer.encode_plus(sentences[0],add_special_tokens = True,max_length =20,pad_to_max_length = True,truncation=True)
dbert_inp
dbert_inp['input_ids']
DistilBERT model output: Give input_ids and the attention_mask obtained from the tokenizer. The output will be a tuple of the size (1,max_len,768)
id_inp=np.asarray(dbert_inp['input_ids'])
mask_inp=np.asarray(dbert_inp['attention_mask'])
out=dbert_model([id_inp.reshape(1,-1),mask_inp.reshape(1,-1)])
type(out),out
out[0][:,0,:]
dbert_tokenizer.decode(dbert_inp['input_ids'])
# Create a basic NN model using DistilBERT embeddings to get the predictions
def create_model():
inps = Input(shape = (max_len,), dtype='int64')
masks= Input(shape = (max_len,), dtype='int64')
dbert_layer = dbert_model(inps, attention_mask=masks)[0][:,0,:]
dense = Dense(512,activation='relu',kernel_regularizer=regularizers.l2(0.01))(dbert_layer)
dropout= Dropout(0.5)(dense)
pred = Dense(num_classes, activation='softmax',kernel_regularizer=regularizers.l2(0.01))(dropout)
model = tf.keras.Model(inputs=[inps,masks], outputs=pred)
print(model.summary())
return model
model=create_model()
# Prepare Model Input
input_ids=[]
attention_masks=[]
for sent in sentences:
dbert_inps=dbert_tokenizer.encode_plus(sent,add_special_tokens = True,max_length =max_len,pad_to_max_length = True,return_attention_mask = True,truncation=True)
input_ids.append(dbert_inps['input_ids'])
attention_masks.append(dbert_inps['attention_mask'])
input_ids=np.asarray(input_ids)
attention_masks=np.array(attention_masks)
labels=np.array(labels)
len(input_ids),len(attention_masks),len(labels)
# Save the model input in the pickle files to use it later without performing the above steps
print('Preparing the pickle file.....')
pickle_inp_path='dbert_inp.pkl'
pickle_mask_path='dbert_mask.pkl'
pickle_label_path='dbert_label.pkl'
pickle.dump((input_ids),open(pickle_inp_path,'wb'))
pickle.dump((attention_masks),open(pickle_mask_path,'wb'))
pickle.dump((labels),open(pickle_label_path,'wb'))
print('Pickle files saved as ',pickle_inp_path,pickle_mask_path,pickle_label_path)
print('Loading the saved pickle files..')
input_ids=pickle.load(open(pickle_inp_path, 'rb'))
attention_masks=pickle.load(open(pickle_mask_path, 'rb'))
labels=pickle.load(open(pickle_label_path, 'rb'))
print('Input shape {} Attention mask shape {} Input label shape {}'.format(input_ids.shape,attention_masks.shape,labels.shape))
label_class_dict={0:'LOC',1:'HUM',2:'NUM',3:'DESC',4:'ABBR',5:'ENTY'}
target_names=label_class_dict.values()
# Train Test split and setting up the loss function, accuracy and optimizer for the model.
train_inp,val_inp,train_label,val_label,train_mask,val_mask=train_test_split(input_ids,labels,attention_masks,test_size=0.2)
print('Train inp shape {} Val input shape {}\nTrain label shape {} Val label shape {}\nTrain attention mask shape {} Val attention mask shape {}'.format(train_inp.shape,val_inp.shape,train_label.shape,val_label.shape,train_mask.shape,val_mask.shape))
log_dir='dbert_model'
model_save_path='./dbert_model.h5'
callbacks = [tf.keras.callbacks.ModelCheckpoint(filepath=model_save_path,save_weights_only=True,monitor='val_loss',mode='min',save_best_only=True),keras.callbacks.TensorBoard(log_dir=log_dir)]
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
metric = tf.keras.metrics.SparseCategoricalAccuracy('accuracy')
optimizer = tf.keras.optimizers.Adam(learning_rate=3e-5)
model.compile(loss=loss,optimizer=optimizer, metrics=[metric])
callbacks= [tf.keras.callbacks.ModelCheckpoint(filepath=model_save_path,save_weights_only=True,monitor='val_loss',mode='min',save_best_only=True),keras.callbacks.TensorBoard(log_dir=log_dir)]
model.compile(loss=loss,optimizer=optimizer, metrics=[metric])
# Training
# train_mask
# train_label
# val_label
history=model.fit([train_inp,train_mask],train_label,batch_size=16,epochs=5,validation_data=([val_inp,val_mask],val_label),callbacks=callbacks)
# Tensorboard visualization (Training-Testing curve)
%load_ext tensorboard
# %tensorboard --logdir {log_dir}
# Use the saved model for predictions and calculating the evaluation metrics
trained_model = create_model()
trained_model.compile(loss=loss,optimizer=optimizer, metrics=[metric])
trained_model.load_weights(model_save_path)
preds = trained_model.predict([val_inp,val_mask],batch_size=16)
pred_labels = preds.argmax(axis=1)
f1 = f1_score(val_label,pred_labels, average = None)
f1
target_names=['NUM', 'DESC', 'HUM', 'ENTY', 'LOC', 'ABBR']
print('F1 score',f1)
print('Classification Report')
print(classification_report(val_label,pred_labels,target_names=target_names))