• A from nltk import sentence_tokenize, Word_tokens =sentence_tokenize(sentence)
  • B from nltk.tokenizer import word_tokenizer, Word_tokens =word_tokenizer(sentence)
  • C from nltk import tokenize_words, Word_tokens =tokenize_words(sentence)
  • D from nltk.tokenize import word_tokenize, Word_tokens =word_tokenize(sentence)
  • Share this MCQ