텍스트 요약기
중요한 문장을 추출하여 긴 텍스트를 요약합니다.
110
자주 묻는 질문
코드 구현
# Extractive text summarization (TF-based)
import re
from collections import Counter
STOP_WORDS = {
"the", "a", "an", "and", "or", "but", "in", "on", "at", "to", "for",
"of", "with", "by", "from", "is", "was", "are", "were", "be", "been",
"has", "have", "had", "do", "does", "did", "will", "would", "could",
"should", "that", "this", "it", "its", "he", "she", "they", "we", "you",
"i", "not", "no", "as", "if", "so", "than", "then", "more", "most",
}
def tokenize_sentences(text: str) -> list[str]:
sentences = re.split(r'(?<=[.!?])\s+', text.strip())
return [s.strip() for s in sentences if len(s.strip()) > 10]
def word_frequency(sentences: list[str]) -> dict[str, int]:
words = re.findall(r"[a-z']+", " ".join(sentences).lower())
return Counter(w for w in words if w not in STOP_WORDS and len(w) > 2)
def score_sentences(sentences: list[str], freq: dict[str, int]) -> list[float]:
scores = []
n = len(sentences)
for i, sentence in enumerate(sentences):
words = re.findall(r"[a-z']+", sentence.lower())
score = sum(freq.get(w, 0) for w in words)
if words:
score /= len(words) # normalize by length
# Position weight
rel_pos = i / max(n - 1, 1)
if rel_pos <= 0.2:
score *= 1.4
elif rel_pos >= 0.8:
score *= 1.2
scores.append(score)
return scores
def summarize(text: str, num_sentences: int = 3) -> str:
sentences = tokenize_sentences(text)
if len(sentences) <= num_sentences:
return text
freq = word_frequency(sentences)
scores = score_sentences(sentences, freq)
# Get top-N sentence indices, sort by original position
ranked = sorted(range(len(scores)), key=lambda i: -scores[i])[:num_sentences]
selected = sorted(ranked)
return " ".join(sentences[i] for i in selected)
# Example
text = """...""" # Your long text here
print(summarize(text, num_sentences=3))Comments & Feedback
Comments are powered by Giscus. Sign in with GitHub to leave a comment.