Mercurial > hg > index.fcgi > www > www-1
view myrss/myrss_app.py @ 47:315afeb47e52
myrss: fix handling embedded HTML tags and special characters; add myrss_test_feed.py
author | paulo |
---|---|
date | Wed, 13 Feb 2013 00:11:58 -0800 |
parents | aca02ce71274 |
children | 66a232bae83c |
line source
1 import os
2 import sys
3 import re
4 import urllib2
5 import threading
6 import Queue
7 import datetime
8 import time
10 import logging
11 logging.basicConfig(level=logging.INFO)
13 import xml.etree.ElementTree
14 import HTMLParser
16 import html
19 FEEDS_FILE = "FEEDS"
20 CACHE_HTML_FILE = "__cache__.html"
22 CACHE_LIFE = 1200 # [seconds]
23 MAX_ITEMS = 50
24 MAX_LINK_Z = 4
25 MAX_THREADS = 20
26 URLOPEN_TIMEOUT = 60 # [seconds]
29 _PARSE_ROOT_TAG_RE = re.compile(r"(\{(.+)\})?(.+)")
31 def _parse_root_tag(root_tag):
32 re_match = _PARSE_ROOT_TAG_RE.match(root_tag)
34 if re_match is None:
35 return (None, None)
36 else:
37 return re_match.group(2, 3)
40 def _strip_if_not_none(txt):
41 return txt.strip() if txt is not None else ''
44 def _go_rss(elementTree):
45 title = _strip_if_not_none(elementTree.find("channel/title").text)
46 link = elementTree.find("channel/link").text
48 items = []
50 for i in elementTree.findall("channel/item")[:MAX_ITEMS]:
51 it_title = _strip_if_not_none(i.find("title").text)
52 it_link = i.find("link").text
54 items.append((it_title, it_link))
56 return (title, link, items)
59 def _go_atom(elementTree):
60 ns = "http://www.w3.org/2005/Atom"
62 title = _strip_if_not_none(elementTree.find("{%s}title" % ns).text)
63 link = ''
65 for i in elementTree.findall("{%s}link" % ns):
66 if i.get("type") == "text/html" and i.get("rel") == "alternate":
67 link = i.get("href")
68 break
70 items = []
72 for i in elementTree.findall("{%s}entry" % ns)[:MAX_ITEMS]:
73 it_title = _strip_if_not_none(i.find("{%s}title" % ns).text)
74 it_link = ''
76 for j in i.findall("{%s}link" % ns):
77 if j.get("type") == "text/html" and j.get("rel") == "alternate":
78 it_link = j.get("href")
79 break
81 items.append((it_title, it_link))
83 return (title, link, items)
86 _STRIP_HTML_RE = re.compile(r"<.*?>")
87 _htmlParser = HTMLParser.HTMLParser()
89 def _strip_html(txt):
90 return _htmlParser.unescape(_STRIP_HTML_RE.sub('', txt))
93 def _to_html(dtnow, docstruct):
94 datetime_str = dtnow.strftime("%Y-%m-%d %H:%M %Z")
95 page_title = "myrss -- %s" % datetime_str
97 root = html.HTML("html")
99 header = root.header
100 header.title(page_title)
101 header.link(rel="stylesheet", type="text/css", href="index.css")
103 body = root.body
104 body.h1(page_title)
106 link_z = 0
108 for feed in docstruct:
109 if feed is None:
110 continue
112 (title, link, items) = feed
114 body.h2.a(_strip_html(title), href=link, klass="z%d" % (link_z % MAX_LINK_Z))
115 link_z += 1
116 p = body.p
118 for (i, (it_title, it_link)) in enumerate(items):
119 if i > 0:
120 p += " - "
122 p.a(_strip_html(it_title), href=it_link, klass="z%d" % (link_z % MAX_LINK_Z))
123 link_z += 1
125 dtdelta = datetime.datetime.now() - dtnow
126 root.div("%.3f" % (dtdelta.days*86400 + dtdelta.seconds + dtdelta.microseconds/1e6), klass="debug")
128 return unicode(root).encode("utf-8")
131 def _fetch_url(url):
132 try:
133 logging.info("processing %s" % url)
134 feed = urllib2.urlopen(urllib2.Request(url, headers={"User-Agent": ''}), timeout=URLOPEN_TIMEOUT)
135 except urllib2.HTTPError as e:
136 logging.info("(%s) %s" % (url, e))
137 return None
139 return feed
142 def _process_feed(feed):
143 ret = None
145 elementTree = xml.etree.ElementTree.parse(feed)
146 root = elementTree.getroot()
148 parsed_root_tag = _parse_root_tag(root.tag)
150 if parsed_root_tag == (None, "rss"):
151 version = float(root.get("version", 0.0))
152 if version >= 2.0:
153 ret = _go_rss(elementTree)
154 else:
155 raise NotImplementedError("Unsupported rss version")
156 elif parsed_root_tag == ("http://www.w3.org/2005/Atom", "feed"):
157 ret = _go_atom(elementTree)
158 else:
159 raise NotImplementedError("Unknown root tag")
161 return ret
164 class WorkerThread(threading.Thread):
165 def __init__(self, *args, **kwargs):
166 self._input_queue = kwargs.pop("input_queue")
167 self._output_queue = kwargs.pop("output_queue")
168 threading.Thread.__init__(self, *args, **kwargs)
169 self.daemon = True
171 def run(self):
172 while True:
173 (idx, url) = self._input_queue.get()
174 docfeed = None
175 try:
176 feed = _fetch_url(url)
177 if feed is not None:
178 docfeed = _process_feed(feed)
179 except Exception as e:
180 logging.info("(%s) exception: %s" % (url, e))
181 self._output_queue.put((idx, docfeed))
184 def main(input_queue, output_queue, lock):
185 ret = ''
187 with lock:
188 epoch_now = time.time()
189 dtnow = datetime.datetime.fromtimestamp(epoch_now)
191 if os.path.exists(CACHE_HTML_FILE) and (epoch_now - os.stat(CACHE_HTML_FILE).st_mtime) < float(CACHE_LIFE):
192 with open(CACHE_HTML_FILE) as cache_html_file:
193 ret = cache_html_file.read()
195 else:
196 with open(FEEDS_FILE) as feeds_file:
197 feedlines = feeds_file.readlines()
199 docstruct = [None]*len(feedlines)
200 num_input = 0
201 for (i, l) in enumerate(feedlines):
202 if l[0] != '#':
203 l = l.strip()
204 input_queue.put((i, l))
205 num_input += 1
207 for _ in range(num_input):
208 (idx, docfeed) = output_queue.get()
209 docstruct[idx] = docfeed
211 ret = _to_html(dtnow, docstruct)
213 with open(CACHE_HTML_FILE, 'w') as cache_html_file:
214 cache_html_file.write(ret)
216 return ret
219 class MyRssApp:
220 def __init__(self):
221 self._iq = Queue.Queue(MAX_THREADS)
222 self._oq = Queue.Queue(MAX_THREADS)
223 self._main_lock = threading.Lock()
225 for _ in range(MAX_THREADS):
226 WorkerThread(input_queue=self._iq, output_queue=self._oq).start()
228 def __call__(self, environ, start_response):
229 response_body = main(self._iq, self._oq, self._main_lock)
230 response_headers = [
231 ("Content-Type", "text/html; charset=UTF-8"),
232 ("Content-Length", str(len(response_body))),
233 ]
234 start_response("200 OK", response_headers)
236 return [response_body]