Mercurial > hg > index.fcgi > www > www-1
view myrss/myrss_app.py @ 70:3456dd3e8660
myrss: add exception handling around main()
author | paulo |
---|---|
date | Wed, 29 Jul 2015 00:20:39 -0600 |
parents | ae0f2f438a95 |
children | c7bbd3805509 |
line source
1 import os
2 import sys
3 import re
4 import urllib2
5 import threading
6 import Queue
7 import datetime
8 import time
9 import traceback
11 import logging
12 logging.basicConfig(
13 #level=logging.DEBUG,
14 #filename="_LOG",
15 #format="%(asctime)s %(levelname)-8s %(message)s",
16 )
18 import xml.etree.ElementTree
19 import HTMLParser
21 import html
24 FEEDS_FILE = "FEEDS"
25 CACHE_HTML_FILE = "__cache__.html"
27 CACHE_LIFE = 1200 # [seconds]
28 MAX_ITEMS = 50
29 MAX_LINK_Z = 4
30 MAX_THREADS = 20
31 URLOPEN_TIMEOUT = 60 # [seconds]
34 _PARSE_ROOT_TAG_RE = re.compile(r"(\{(.+)\})?(.+)")
36 def _parse_root_tag(root_tag):
37 re_match = _PARSE_ROOT_TAG_RE.match(root_tag)
39 if re_match is None:
40 return (None, None)
41 else:
42 return re_match.group(2, 3)
45 def _strip_if_not_none(txt):
46 return txt.strip() if txt is not None else ''
49 def _go_rss(elementTree):
50 title = _strip_if_not_none(elementTree.find("channel/title").text)
51 link = elementTree.find("channel/link").text
53 items = []
55 for i in elementTree.findall("channel/item")[:MAX_ITEMS]:
56 it_title = _strip_if_not_none(i.find("title").text)
57 it_link = i.find("link").text
59 items.append((it_title, it_link))
61 return (title, link, items)
64 def _go_atom(elementTree):
65 ns = "http://www.w3.org/2005/Atom"
67 title = _strip_if_not_none(elementTree.find("{%s}title" % ns).text)
68 link = ''
70 for i in elementTree.findall("{%s}link" % ns):
71 if i.get("type") == "text/html" and i.get("rel") == "alternate":
72 link = i.get("href")
73 break
75 items = []
77 for i in elementTree.findall("{%s}entry" % ns)[:MAX_ITEMS]:
78 it_title = _strip_if_not_none(i.find("{%s}title" % ns).text)
79 it_link = ''
81 for j in i.findall("{%s}link" % ns):
82 if j.get("type") == "text/html" and j.get("rel") == "alternate":
83 it_link = j.get("href")
84 break
86 items.append((it_title, it_link))
88 return (title, link, items)
91 def _go_purl_rss(elementTree):
92 ns = "http://purl.org/rss/1.0/"
94 title = _strip_if_not_none(elementTree.find("{%s}channel/{%s}title" % (ns, ns)).text)
95 link = elementTree.find("{%s}channel/{%s}link" % (ns, ns)).text
97 items = []
99 for i in elementTree.findall("{%s}item" % ns)[:MAX_ITEMS]:
100 it_title = _strip_if_not_none(i.find("{%s}title" % ns).text)
101 it_link = i.find("{%s}link" % ns).text
103 items.append((it_title, it_link))
105 return (title, link, items)
108 _STRIP_HTML_RE = re.compile(r"<.*?>")
109 _htmlParser = HTMLParser.HTMLParser()
111 def _strip_html(txt):
112 return _htmlParser.unescape(_STRIP_HTML_RE.sub('', txt))
115 def _to_html(dtnow, docstruct):
116 datetime_str = dtnow.strftime("%Y-%m-%d %H:%M %Z")
117 page_title = "myrss -- %s" % datetime_str
119 root = html.HTML("html")
121 header = root.header
122 header.title(page_title)
123 header.link(rel="stylesheet", type="text/css", href="index.css")
125 body = root.body
126 body.h1(page_title)
128 link_z = 0
130 for feed in docstruct:
131 if feed is None:
132 continue
134 (title, link, items) = feed
136 body.h2.a(_strip_html(title), href=link, klass="z%d" % (link_z % MAX_LINK_Z))
137 link_z += 1
138 p = body.p
140 for (i, (it_title, it_link)) in enumerate(items):
141 if i > 0:
142 p += " - "
144 p.a(_strip_html(it_title), href=it_link, klass="z%d" % (link_z % MAX_LINK_Z))
145 link_z += 1
147 dtdelta = datetime.datetime.now() - dtnow
148 root.div("%.3f" % (dtdelta.days*86400 + dtdelta.seconds + dtdelta.microseconds/1e6), klass="debug")
150 return unicode(root).encode("utf-8")
153 def _fetch_url(url):
154 try:
155 logging.info("processing %s" % url)
156 feed = urllib2.urlopen(urllib2.Request(url, headers={"User-Agent": ''}), timeout=URLOPEN_TIMEOUT)
157 except urllib2.HTTPError as e:
158 logging.info("(%s) %s" % (url, e))
159 return None
161 return feed
164 def _process_feed(feed):
165 ret = None
167 elementTree = xml.etree.ElementTree.parse(feed)
168 root = elementTree.getroot()
170 parsed_root_tag = _parse_root_tag(root.tag)
172 if parsed_root_tag == (None, "rss"):
173 version = float(root.get("version", 0.0))
174 if version >= 2.0:
175 ret = _go_rss(elementTree)
176 else:
177 raise NotImplementedError("Unsupported rss version")
178 elif parsed_root_tag == ("http://www.w3.org/2005/Atom", "feed"):
179 ret = _go_atom(elementTree)
180 elif parsed_root_tag == ("http://www.w3.org/1999/02/22-rdf-syntax-ns#", "RDF"):
181 ret = _go_purl_rss(elementTree)
182 else:
183 raise NotImplementedError("Unknown root tag")
185 return ret
188 class WorkerThread(threading.Thread):
189 def __init__(self, *args, **kwargs):
190 self._input_queue = kwargs.pop("input_queue")
191 self._output_queue = kwargs.pop("output_queue")
192 threading.Thread.__init__(self, *args, **kwargs)
193 self.daemon = True
195 def run(self):
196 while True:
197 (idx, url) = self._input_queue.get()
198 docfeed = None
199 try:
200 feed = _fetch_url(url)
201 if feed is not None:
202 docfeed = _process_feed(feed)
203 except Exception as e:
204 logging.info("(%s) exception: %s" % (url, e))
205 self._output_queue.put((idx, docfeed))
208 def main(input_queue, output_queue, lock):
209 ret = ''
211 with lock:
212 epoch_now = time.time()
213 dtnow = datetime.datetime.fromtimestamp(epoch_now)
215 if os.path.exists(CACHE_HTML_FILE) and (epoch_now - os.stat(CACHE_HTML_FILE).st_mtime) < float(CACHE_LIFE):
216 with open(CACHE_HTML_FILE) as cache_html_file:
217 ret = cache_html_file.read()
219 else:
220 with open(FEEDS_FILE) as feeds_file:
221 feedlines = feeds_file.readlines()
223 docstruct = [None]*len(feedlines)
224 num_input = 0
225 for (i, l) in enumerate(feedlines):
226 if l[0] != '#':
227 l = l.strip()
228 input_queue.put((i, l))
229 num_input += 1
231 for _ in range(num_input):
232 (idx, docfeed) = output_queue.get()
233 docstruct[idx] = docfeed
235 ret = _to_html(dtnow, docstruct)
237 with open(CACHE_HTML_FILE, 'w') as cache_html_file:
238 cache_html_file.write(ret)
240 return ret
243 class MyRssApp:
244 def __init__(self):
245 self._iq = Queue.Queue(MAX_THREADS)
246 self._oq = Queue.Queue(MAX_THREADS)
247 self._main_lock = threading.Lock()
249 for _ in range(MAX_THREADS):
250 WorkerThread(input_queue=self._iq, output_queue=self._oq).start()
252 def __call__(self, environ, start_response):
253 response_code = "500 Internal Server Error"
254 response_type = "text/plain; charset=UTF-8"
256 try:
257 response_body = main(self._iq, self._oq, self._main_lock)
258 response_code = "200 OK"
259 response_type = "text/html; charset=UTF-8"
260 except:
261 response_body = traceback.format_exc()
263 response_headers = [
264 ("Content-Type", response_type),
265 ("Content-Length", str(len(response_body))),
266 ]
267 start_response(response_code, response_headers)
269 return [response_body]