def Preprocess_listofSentence(listofSentence):
preprocess_list = []
for sentence in listofSentence :
sentence_w_punct = "".join([i.lower() for i in sentence if i not in string.punctuation])
sentence_w_num = ''.join(i for i in sentence_w_punct if not i.isdigit())
tokenize_sentence = nltk.tokenize.word_tokenize(sentence_w_num)
words_w_stopwords = [i for i in tokenize_sentence if i not in stopwords]
words_lemmatize = (lemmatizer.lemmatize(w) for w in words_w_stopwords)
sentence_clean = ' '.join(w for w in words_lemmatize if w.lower() in words or not w.isalpha())
preprocess_list.append(sentence_clean)
return preprocess_list