From b6d1e705f0b224e3c66b382bc10eec0884c377a1 Mon Sep 17 00:00:00 2001
From: Ronald Schaten
Date: Mon, 15 Apr 2013 23:33:24 +0200
Subject: [PATCH] implemented newer version of readability-module
---
hn.py | 231 ---------------
models/entry.py | 13 +-
readability/__init__.py | 1 +
readability/cleaners.py | 32 ++
readability/debug.py | 25 ++
readability/encoding.py | 21 ++
readability/htmls.py | 115 +++++++
readability/readability.py | 593 +++++++++++++++++++++++++++++++++++++
8 files changed, 797 insertions(+), 234 deletions(-)
delete mode 100644 hn.py
create mode 100644 readability/__init__.py
create mode 100644 readability/cleaners.py
create mode 100644 readability/debug.py
create mode 100644 readability/encoding.py
create mode 100644 readability/htmls.py
create mode 100755 readability/readability.py
diff --git a/hn.py b/hn.py
deleted file mode 100644
index 71dd4c2..0000000
--- a/hn.py
+++ /dev/null
@@ -1,231 +0,0 @@
-"""
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with this program. If not, see .
-"""
-
-
-from xml.sax.saxutils import escape
-
-import urllib, re, os, urlparse
-import HTMLParser, feedparser
-from BeautifulSoup import BeautifulSoup
-from pprint import pprint
-
-import codecs
-import sys
-streamWriter = codecs.lookup('utf-8')[-1]
-sys.stdout = streamWriter(sys.stdout)
-
-
-HN_RSS_FEED = "http://news.ycombinator.com/rss"
-
-NEGATIVE = re.compile("comment|meta|footer|footnote|foot")
-POSITIVE = re.compile("post|hentry|entry|content|text|body|article")
-PUNCTUATION = re.compile("""[!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~]""")
-
-
-def grabContent(link, html):
-
- replaceBrs = re.compile(" [ \r\n]* ")
- html = re.sub(replaceBrs, "
", html)
-
- try:
- soup = BeautifulSoup(html)
- except HTMLParser.HTMLParseError:
- return ""
-
- # REMOVE SCRIPTS
- for s in soup.findAll("script"):
- s.extract()
-
- allParagraphs = soup.findAll("p")
- topParent = None
-
- parents = []
- for paragraph in allParagraphs:
-
- parent = paragraph.parent
-
- if (parent not in parents):
- parents.append(parent)
- parent.score = 0
-
- if (parent.has_key("class")):
- if (NEGATIVE.match(parent["class"])):
- parent.score -= 50
- if (POSITIVE.match(parent["class"])):
- parent.score += 25
-
- if (parent.has_key("id")):
- if (NEGATIVE.match(parent["id"])):
- parent.score -= 50
- if (POSITIVE.match(parent["id"])):
- parent.score += 25
-
- if (parent.score == None):
- parent.score = 0
-
- innerText = paragraph.renderContents() #"".join(paragraph.findAll(text=True))
- if (len(innerText) > 10):
- parent.score += 1
-
- parent.score += innerText.count(",")
-
- for parent in parents:
- if ((not topParent) or (parent.score > topParent.score)):
- topParent = parent
-
- if (not topParent):
- return ""
-
- # REMOVE LINK'D STYLES
- styleLinks = soup.findAll("link", attrs={"type" : "text/css"})
- for s in styleLinks:
- s.extract()
-
- # REMOVE ON PAGE STYLES
- for s in soup.findAll("style"):
- s.extract()
-
- # CLEAN STYLES FROM ELEMENTS IN TOP PARENT
- for ele in topParent.findAll(True):
- del(ele['style'])
- del(ele['class'])
-
- killDivs(topParent)
- clean(topParent, "form")
- clean(topParent, "object")
- clean(topParent, "iframe")
-
- fixLinks(topParent, link)
-
- return topParent.renderContents()
-
-
-def fixLinks(parent, link):
- tags = parent.findAll(True)
-
- for t in tags:
- if (t.has_key("href")):
- t["href"] = urlparse.urljoin(link, t["href"])
- if (t.has_key("src")):
- t["src"] = urlparse.urljoin(link, t["src"])
-
-
-def clean(top, tag, minWords=10000):
- tags = top.findAll(tag)
-
- for t in tags:
- if (t.renderContents().count(" ") < minWords):
- t.extract()
-
-
-def killDivs(parent):
-
- divs = parent.findAll("div")
- for d in divs:
- p = len(d.findAll("p"))
- img = len(d.findAll("img"))
- li = len(d.findAll("li"))
- a = len(d.findAll("a"))
- embed = len(d.findAll("embed"))
- pre = len(d.findAll("pre"))
- code = len(d.findAll("code"))
-
- if (d.renderContents().count(",") < 10):
- if ((pre == 0) and (code == 0)):
- if ((img > p ) or (li > p) or (a > p) or (p == 0) or (embed > 0)):
- d.extract()
-
-
-def upgradeLink(link):
-
- link = link.encode('utf-8')
-
- if (not (link.startswith("http://news.ycombinator.com") or link.endswith(".pdf"))):
- linkFile = "upgraded/" + re.sub(PUNCTUATION, "_", link)
- if (os.path.exists(linkFile)):
- return open(linkFile).read()
- else:
- content = ""
- try:
- html = urllib.urlopen(link).read()
- content = grabContent(link, html)
- filp = open(linkFile, "w")
- filp.write(content)
- filp.close()
- except IOError:
- pass
- return content
- else:
- return ""
-
-
-
-def upgradeFeed(feedUrl):
-
- feedData = urllib.urlopen(feedUrl).read()
-
- upgradedLinks = []
- parsedFeed = feedparser.parse(feedData)
-
- for entry in parsedFeed.entries:
- upgradedLinks.append((entry, upgradeLink(entry.link)))
-
- rss = """
-
- Hacker News
- http://news.ycombinator.com/
- Links for the intellectually curious, ranked by readers.
-
- """
-
- for entry, content in upgradedLinks:
- rss += u"""
-
- %s
- %s
- %s
-
- Comments %s Comments]]>
-
-
-""" % (entry.title, escape(entry.link), escape(entry.comments), entry.comments, content.decode('utf-8'), entry.comments)
-
- rss += """
-
-"""
-
-
- return rss
-
-if __name__ == "__main__":
- print upgradeFeed(HN_RSS_FEED)
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/models/entry.py b/models/entry.py
index db460a4..f8ef0dd 100644
--- a/models/entry.py
+++ b/models/entry.py
@@ -2,15 +2,22 @@ from sqlalchemy import Column, Integer, ForeignKey, String, Text, DateTime
from datetime import datetime
from time import mktime
import urllib2
-#import hn
+from readability.readability import Document
import html2text
import HTMLParser
from models import Base
def fetch_readability(link):
- text = hn.upgradeLink(link)
- text = text.decode('utf8')
+ h2t = html2text.HTML2Text()
+ h2t.body_width = 0
+ h2t.inline_links = False
+ h2t.ignore_links = True
+ h2t.ignore_images = True
+ response = urllib2.urlopen(link)
+ text = response.read()
+ text = Document(text).summary()
+ text = h2t.handle(text)
return text
def fetch_full_page(link):
diff --git a/readability/__init__.py b/readability/__init__.py
new file mode 100644
index 0000000..8822a51
--- /dev/null
+++ b/readability/__init__.py
@@ -0,0 +1 @@
+from .readability import Document
diff --git a/readability/cleaners.py b/readability/cleaners.py
new file mode 100644
index 0000000..9b158c5
--- /dev/null
+++ b/readability/cleaners.py
@@ -0,0 +1,32 @@
+# strip out a set of nuisance html attributes that can mess up rendering in RSS feeds
+import re
+from lxml.html.clean import Cleaner
+
+bad_attrs = ['width', 'height', 'style', '[-a-z]*color', 'background[-a-z]*', 'on*']
+single_quoted = "'[^']+'"
+double_quoted = '"[^"]+"'
+non_space = '[^ "\'>]+'
+htmlstrip = re.compile("<" # open
+ "([^>]+) " # prefix
+ "(?:%s) *" % ('|'.join(bad_attrs),) + # undesirable attributes
+ '= *(?:%s|%s|%s)' % (non_space, single_quoted, double_quoted) + # value
+ "([^>]*)" # postfix
+ ">" # end
+, re.I)
+
+def clean_attributes(html):
+ while htmlstrip.search(html):
+ html = htmlstrip.sub('<\\1\\2>', html)
+ return html
+
+def normalize_spaces(s):
+ if not s: return ''
+ """replace any sequence of whitespace
+ characters with a single space"""
+ return ' '.join(s.split())
+
+html_cleaner = Cleaner(scripts=True, javascript=True, comments=True,
+ style=True, links=True, meta=False, add_nofollow=False,
+ page_structure=False, processing_instructions=True, embedded=False,
+ frames=False, forms=False, annoying_tags=False, remove_tags=None,
+ remove_unknown_tags=False, safe_attrs_only=False)
diff --git a/readability/debug.py b/readability/debug.py
new file mode 100644
index 0000000..a5e644d
--- /dev/null
+++ b/readability/debug.py
@@ -0,0 +1,25 @@
+def save_to_file(text, filename):
+ f = open(filename, 'wt')
+ f.write('')
+ f.write(text.encode('utf-8'))
+ f.close()
+
+uids = {}
+def describe(node, depth=2):
+ if not hasattr(node, 'tag'):
+ return "[%s]" % type(node)
+ name = node.tag
+ if node.get('id', ''): name += '#'+node.get('id')
+ if node.get('class', ''):
+ name += '.' + node.get('class').replace(' ','.')
+ if name[:4] in ['div#', 'div.']:
+ name = name[3:]
+ if name in ['tr', 'td', 'div', 'p']:
+ if not node in uids:
+ uid = uids[node] = len(uids)+1
+ else:
+ uid = uids.get(node)
+ name += "%02d" % (uid)
+ if depth and node.getparent() is not None:
+ return name+' - '+describe(node.getparent(), depth-1)
+ return name
diff --git a/readability/encoding.py b/readability/encoding.py
new file mode 100644
index 0000000..d05b7f4
--- /dev/null
+++ b/readability/encoding.py
@@ -0,0 +1,21 @@
+import re
+import chardet
+
+def get_encoding(page):
+ text = re.sub('?[^>]*>\s*', ' ', page)
+ enc = 'utf-8'
+ if not text.strip() or len(text) < 10:
+ return enc # can't guess
+ try:
+ diff = text.decode(enc, 'ignore').encode(enc)
+ sizes = len(diff), len(text)
+ if abs(len(text) - len(diff)) < max(sizes) * 0.01: # 99% of utf-8
+ return enc
+ except UnicodeDecodeError:
+ pass
+ res = chardet.detect(text)
+ enc = res['encoding']
+ #print '->', enc, "%.2f" % res['confidence']
+ if enc == 'MacCyrillic':
+ enc = 'cp1251'
+ return enc
diff --git a/readability/htmls.py b/readability/htmls.py
new file mode 100644
index 0000000..9b59993
--- /dev/null
+++ b/readability/htmls.py
@@ -0,0 +1,115 @@
+from cleaners import normalize_spaces, clean_attributes
+from encoding import get_encoding
+from lxml.html import tostring
+import logging
+import lxml.html
+import re
+
+logging.getLogger().setLevel(logging.DEBUG)
+
+utf8_parser = lxml.html.HTMLParser(encoding='utf-8')
+
+def build_doc(page):
+ if isinstance(page, unicode):
+ page_unicode = page
+ else:
+ enc = get_encoding(page)
+ page_unicode = page.decode(enc, 'replace')
+ doc = lxml.html.document_fromstring(page_unicode.encode('utf-8', 'replace'), parser=utf8_parser)
+ return doc
+
+def js_re(src, pattern, flags, repl):
+ return re.compile(pattern, flags).sub(src, repl.replace('$', '\\'))
+
+
+def normalize_entities(cur_title):
+ entities = {
+ u'\u2014':'-',
+ u'\u2013':'-',
+ u'—': '-',
+ u'–': '-',
+ u'\u00A0': ' ',
+ u'\u00AB': '"',
+ u'\u00BB': '"',
+ u'"': '"',
+ }
+ for c, r in entities.iteritems():
+ if c in cur_title:
+ cur_title = cur_title.replace(c, r)
+
+ return cur_title
+
+def norm_title(title):
+ return normalize_entities(normalize_spaces(title))
+
+def get_title(doc):
+ title = doc.find('.//title')
+ if title is None or len(title.text) == 0:
+ return '[no-title]'
+
+ return norm_title(title.text)
+
+def add_match(collection, text, orig):
+ text = norm_title(text)
+ if len(text.split()) >= 2 and len(text) >= 15:
+ if text.replace('"', '') in orig.replace('"', ''):
+ collection.add(text)
+
+def shorten_title(doc):
+ title = doc.find('.//title')
+ if title is None or title.text is None or len(title.text) == 0:
+ return ''
+
+ title = orig = norm_title(title.text)
+
+ candidates = set()
+
+ for item in ['.//h1', './/h2', './/h3']:
+ for e in list(doc.iterfind(item)):
+ if e.text:
+ add_match(candidates, e.text, orig)
+ if e.text_content():
+ add_match(candidates, e.text_content(), orig)
+
+ for item in ['#title', '#head', '#heading', '.pageTitle', '.news_title', '.title', '.head', '.heading', '.contentheading', '.small_header_red']:
+ for e in doc.cssselect(item):
+ if e.text:
+ add_match(candidates, e.text, orig)
+ if e.text_content():
+ add_match(candidates, e.text_content(), orig)
+
+ if candidates:
+ title = sorted(candidates, key=len)[-1]
+ else:
+ for delimiter in [' | ', ' - ', ' :: ', ' / ']:
+ if delimiter in title:
+ parts = orig.split(delimiter)
+ if len(parts[0].split()) >= 4:
+ title = parts[0]
+ break
+ elif len(parts[-1].split()) >= 4:
+ title = parts[-1]
+ break
+ else:
+ if ': ' in title:
+ parts = orig.split(': ')
+ if len(parts[-1].split()) >= 4:
+ title = parts[-1]
+ else:
+ title = orig.split(': ', 1)[1]
+
+ if not 15 < len(title) < 150:
+ return orig
+
+ return title
+
+def get_body(doc):
+ [ elem.drop_tree() for elem in doc.xpath('.//script | .//link | .//style') ]
+ raw_html = unicode(tostring(doc.body or doc))
+ cleaned = clean_attributes(raw_html)
+ try:
+ #BeautifulSoup(cleaned) #FIXME do we really need to try loading it?
+ return cleaned
+ except Exception: #FIXME find the equivalent lxml error
+ logging.error("cleansing broke html content: %s\n---------\n%s" % (raw_html, cleaned))
+ return raw_html
diff --git a/readability/readability.py b/readability/readability.py
new file mode 100755
index 0000000..fc37636
--- /dev/null
+++ b/readability/readability.py
@@ -0,0 +1,593 @@
+#!/usr/bin/env python
+import logging
+import re
+import sys
+
+from collections import defaultdict
+from lxml.etree import tostring
+from lxml.etree import tounicode
+from lxml.html import document_fromstring
+from lxml.html import fragment_fromstring
+
+from cleaners import clean_attributes
+from cleaners import html_cleaner
+from htmls import build_doc
+from htmls import get_body
+from htmls import get_title
+from htmls import shorten_title
+
+
+logging.basicConfig(level=logging.INFO)
+log = logging.getLogger()
+
+
+REGEXES = {
+ 'unlikelyCandidatesRe': re.compile('combx|comment|community|disqus|extra|foot|header|menu|remark|rss|shoutbox|sidebar|sponsor|ad-break|agegate|pagination|pager|popup|tweet|twitter', re.I),
+ 'okMaybeItsACandidateRe': re.compile('and|article|body|column|main|shadow', re.I),
+ 'positiveRe': re.compile('article|body|content|entry|hentry|main|page|pagination|post|text|blog|story', re.I),
+ 'negativeRe': re.compile('combx|comment|com-|contact|foot|footer|footnote|masthead|media|meta|outbrain|promo|related|scroll|shoutbox|sidebar|sponsor|shopping|tags|tool|widget', re.I),
+ 'divToPElementsRe': re.compile('<(a|blockquote|dl|div|img|ol|p|pre|table|ul)', re.I),
+ #'replaceBrsRe': re.compile('( ]*>[ \n\r\t]*){2,}',re.I),
+ #'replaceFontsRe': re.compile('<(\/?)font[^>]*>',re.I),
+ #'trimRe': re.compile('^\s+|\s+$/'),
+ #'normalizeRe': re.compile('\s{2,}/'),
+ #'killBreaksRe': re.compile('( (\s| ?)*){1,}/'),
+ #'videoRe': re.compile('http:\/\/(www\.)?(youtube|vimeo)\.com', re.I),
+ #skipFootnoteLink: /^\s*(\[?[a-z0-9]{1,2}\]?|^|edit|citation needed)\s*$/i,
+}
+
+
+class Unparseable(ValueError):
+ pass
+
+
+def describe(node, depth=1):
+ if not hasattr(node, 'tag'):
+ return "[%s]" % type(node)
+ name = node.tag
+ if node.get('id', ''):
+ name += '#' + node.get('id')
+ if node.get('class', ''):
+ name += '.' + node.get('class').replace(' ', '.')
+ if name[:4] in ['div#', 'div.']:
+ name = name[3:]
+ if depth and node.getparent() is not None:
+ return name + ' - ' + describe(node.getparent(), depth - 1)
+ return name
+
+
+def to_int(x):
+ if not x:
+ return None
+ x = x.strip()
+ if x.endswith('px'):
+ return int(x[:-2])
+ if x.endswith('em'):
+ return int(x[:-2]) * 12
+ return int(x)
+
+
+def clean(text):
+ text = re.sub('\s*\n\s*', '\n', text)
+ text = re.sub('[ \t]{2,}', ' ', text)
+ return text.strip()
+
+
+def text_length(i):
+ return len(clean(i.text_content() or ""))
+
+
+class Document:
+ """Class to build a etree document out of html."""
+ TEXT_LENGTH_THRESHOLD = 25
+ RETRY_LENGTH = 250
+
+ def __init__(self, input, **options):
+ """Generate the document
+
+ :param input: string of the html content.
+
+ kwargs:
+ - attributes:
+ - debug: output debug messages
+ - min_text_length:
+ - retry_length:
+ - url: will allow adjusting links to be absolute
+
+ """
+ self.input = input
+ self.options = options
+ self.html = None
+
+ def _html(self, force=False):
+ if force or self.html is None:
+ self.html = self._parse(self.input)
+ return self.html
+
+ def _parse(self, input):
+ doc = build_doc(input)
+ doc = html_cleaner.clean_html(doc)
+ base_href = self.options.get('url', None)
+ if base_href:
+ doc.make_links_absolute(base_href, resolve_base_href=True)
+ else:
+ doc.resolve_base_href()
+ return doc
+
+ def content(self):
+ return get_body(self._html(True))
+
+ def title(self):
+ return get_title(self._html(True))
+
+ def short_title(self):
+ return shorten_title(self._html(True))
+
+ def get_clean_html(self):
+ return clean_attributes(tounicode(self.html))
+
+ def summary(self, html_partial=False):
+ """Generate the summary of the html docuemnt
+
+ :param html_partial: return only the div of the document, don't wrap
+ in html and body tags.
+
+ """
+ try:
+ ruthless = True
+ while True:
+ self._html(True)
+ for i in self.tags(self.html, 'script', 'style'):
+ i.drop_tree()
+ for i in self.tags(self.html, 'body'):
+ i.set('id', 'readabilityBody')
+ if ruthless:
+ self.remove_unlikely_candidates()
+ self.transform_misused_divs_into_paragraphs()
+ candidates = self.score_paragraphs()
+
+ best_candidate = self.select_best_candidate(candidates)
+
+ if best_candidate:
+ article = self.get_article(candidates, best_candidate,
+ html_partial=html_partial)
+ else:
+ if ruthless:
+ log.debug("ruthless removal did not work. ")
+ ruthless = False
+ self.debug(
+ ("ended up stripping too much - "
+ "going for a safer _parse"))
+ # try again
+ continue
+ else:
+ log.debug(
+ ("Ruthless and lenient parsing did not work. "
+ "Returning raw html"))
+ article = self.html.find('body')
+ if article is None:
+ article = self.html
+ cleaned_article = self.sanitize(article, candidates)
+ article_length = len(cleaned_article or '')
+ retry_length = self.options.get(
+ 'retry_length',
+ self.RETRY_LENGTH)
+ of_acceptable_length = article_length >= retry_length
+ if ruthless and not of_acceptable_length:
+ ruthless = False
+ # Loop through and try again.
+ continue
+ else:
+ return cleaned_article
+ except StandardError, e:
+ log.exception('error getting summary: ')
+ raise Unparseable(str(e)), None, sys.exc_info()[2]
+
+ def get_article(self, candidates, best_candidate, html_partial=False):
+ # Now that we have the top candidate, look through its siblings for
+ # content that might also be related.
+ # Things like preambles, content split by ads that we removed, etc.
+ sibling_score_threshold = max([
+ 10,
+ best_candidate['content_score'] * 0.2])
+ # create a new html document with a html->body->div
+ if html_partial:
+ output = fragment_fromstring('
')
+ else:
+ output = document_fromstring('')
+ best_elem = best_candidate['elem']
+ for sibling in best_elem.getparent().getchildren():
+ # in lxml there no concept of simple text
+ # if isinstance(sibling, NavigableString): continue
+ append = False
+ if sibling is best_elem:
+ append = True
+ sibling_key = sibling # HashableElement(sibling)
+ if sibling_key in candidates and \
+ candidates[sibling_key]['content_score'] >= sibling_score_threshold:
+ append = True
+
+ if sibling.tag == "p":
+ link_density = self.get_link_density(sibling)
+ node_content = sibling.text or ""
+ node_length = len(node_content)
+
+ if node_length > 80 and link_density < 0.25:
+ append = True
+ elif node_length <= 80 \
+ and link_density == 0 \
+ and re.search('\.( |$)', node_content):
+ append = True
+
+ if append:
+ # We don't want to append directly to output, but the div
+ # in html->body->div
+ if html_partial:
+ output.append(sibling)
+ else:
+ output.getchildren()[0].getchildren()[0].append(sibling)
+ #if output is not None:
+ # output.append(best_elem)
+ return output
+
+ def select_best_candidate(self, candidates):
+ sorted_candidates = sorted(candidates.values(), key=lambda x: x['content_score'], reverse=True)
+ for candidate in sorted_candidates[:5]:
+ elem = candidate['elem']
+ self.debug("Top 5 : %6.3f %s" % (
+ candidate['content_score'],
+ describe(elem)))
+
+ if len(sorted_candidates) == 0:
+ return None
+
+ best_candidate = sorted_candidates[0]
+ return best_candidate
+
+ def get_link_density(self, elem):
+ link_length = 0
+ for i in elem.findall(".//a"):
+ link_length += text_length(i)
+ #if len(elem.findall(".//div") or elem.findall(".//p")):
+ # link_length = link_length
+ total_length = text_length(elem)
+ return float(link_length) / max(total_length, 1)
+
+ def score_paragraphs(self, ):
+ MIN_LEN = self.options.get(
+ 'min_text_length',
+ self.TEXT_LENGTH_THRESHOLD)
+ candidates = {}
+ ordered = []
+ for elem in self.tags(self._html(), "p", "pre", "td"):
+ parent_node = elem.getparent()
+ if parent_node is None:
+ continue
+ grand_parent_node = parent_node.getparent()
+
+ inner_text = clean(elem.text_content() or "")
+ inner_text_len = len(inner_text)
+
+ # If this paragraph is less than 25 characters
+ # don't even count it.
+ if inner_text_len < MIN_LEN:
+ continue
+
+ if parent_node not in candidates:
+ candidates[parent_node] = self.score_node(parent_node)
+ ordered.append(parent_node)
+
+ if grand_parent_node is not None and grand_parent_node not in candidates:
+ candidates[grand_parent_node] = self.score_node(
+ grand_parent_node)
+ ordered.append(grand_parent_node)
+
+ content_score = 1
+ content_score += len(inner_text.split(','))
+ content_score += min((inner_text_len / 100), 3)
+ #if elem not in candidates:
+ # candidates[elem] = self.score_node(elem)
+
+ #WTF? candidates[elem]['content_score'] += content_score
+ candidates[parent_node]['content_score'] += content_score
+ if grand_parent_node is not None:
+ candidates[grand_parent_node]['content_score'] += content_score / 2.0
+
+ # Scale the final candidates score based on link density. Good content
+ # should have a relatively small link density (5% or less) and be
+ # mostly unaffected by this operation.
+ for elem in ordered:
+ candidate = candidates[elem]
+ ld = self.get_link_density(elem)
+ score = candidate['content_score']
+ self.debug("Candid: %6.3f %s link density %.3f -> %6.3f" % (
+ score,
+ describe(elem),
+ ld,
+ score * (1 - ld)))
+ candidate['content_score'] *= (1 - ld)
+
+ return candidates
+
+ def class_weight(self, e):
+ weight = 0
+ if e.get('class', None):
+ if REGEXES['negativeRe'].search(e.get('class')):
+ weight -= 25
+
+ if REGEXES['positiveRe'].search(e.get('class')):
+ weight += 25
+
+ if e.get('id', None):
+ if REGEXES['negativeRe'].search(e.get('id')):
+ weight -= 25
+
+ if REGEXES['positiveRe'].search(e.get('id')):
+ weight += 25
+
+ return weight
+
+ def score_node(self, elem):
+ content_score = self.class_weight(elem)
+ name = elem.tag.lower()
+ if name == "div":
+ content_score += 5
+ elif name in ["pre", "td", "blockquote"]:
+ content_score += 3
+ elif name in ["address", "ol", "ul", "dl", "dd", "dt", "li", "form"]:
+ content_score -= 3
+ elif name in ["h1", "h2", "h3", "h4", "h5", "h6", "th"]:
+ content_score -= 5
+ return {
+ 'content_score': content_score,
+ 'elem': elem
+ }
+
+ def debug(self, *a):
+ if self.options.get('debug', False):
+ log.debug(*a)
+
+ def remove_unlikely_candidates(self):
+ for elem in self.html.iter():
+ s = "%s %s" % (elem.get('class', ''), elem.get('id', ''))
+ if len(s) < 2:
+ continue
+ #self.debug(s)
+ if REGEXES['unlikelyCandidatesRe'].search(s) and (not REGEXES['okMaybeItsACandidateRe'].search(s)) and elem.tag not in ['html', 'body']:
+ self.debug("Removing unlikely candidate - %s" % describe(elem))
+ elem.drop_tree()
+
+ def transform_misused_divs_into_paragraphs(self):
+ for elem in self.tags(self.html, 'div'):
+ # transform
s that do not contain other block elements into
+ #