$ pip3 install sumy
import MeCab
from sumy.parsers.plaintext import PlaintextParser
from sumy.nlp.tokenizers import Tokenizer
from sumy.summarizers.lex_rank import LexRankSummarizer
text = ""
def summy_test(text):
dic_url = ""
tagger = MeCab.Tagger()
key = tagger.parse(text)
corpus = []
for row in key.split("\n"):
word = row.split("\t")[0]
if word == "EOS":
break
else:
corpus.append(word)
parser = PlaintextParser.from_string(text, Tokenizer('japanese'))
summarizer = LexRankSummarizer()
summarizer.stop_words = ['']
summary = summarizer(document=parser.document, sentences_count=2)
b = []
for sentence in summary:
b.append(sentence.__str__())
return "".join(b)
print(summy_test(text))
簡単やわ