view myrss/myrss_app.py @ 46:aca02ce71274

myrss_app.py: add urlopen timeout, and render time
author paulo
date Tue, 12 Feb 2013 00:43:11 -0700
parents c673e9e9c4ca
children 315afeb47e52
line source
1 import os
2 import sys
3 import re
4 import urllib2
5 import threading
6 import Queue
7 import datetime
8 import time
9 import logging
10 logging.basicConfig(level=logging.INFO)
12 import html
13 import xml.etree.ElementTree
16 FEEDS_FILE = "FEEDS"
17 CACHE_HTML_FILE = "__cache__.html"
19 CACHE_LIFE = 1200 # [seconds]
20 MAX_ITEMS = 30
21 MAX_LINK_Z = 4
22 MAX_THREADS = 20
23 URLOPEN_TIMEOUT = 60 # [seconds]
26 _PARSE_ROOT_TAG_RE = re.compile(r"(\{(.+)\})?(.+)")
28 def _parse_root_tag(root_tag):
29 re_match = _PARSE_ROOT_TAG_RE.match(root_tag)
31 if re_match is None:
32 return (None, None)
33 else:
34 return re_match.group(2, 3)
37 def _go_rss(elementTree):
38 title = elementTree.find("channel/title").text.strip()
39 link = elementTree.find("channel/link").text
41 items = []
43 for i in elementTree.findall("channel/item")[:MAX_ITEMS]:
44 it_title = i.find("title").text.strip()
45 it_link = i.find("link").text
47 items.append((it_title, it_link))
49 return (title, link, items)
52 def _go_atom(elementTree):
53 ns = "http://www.w3.org/2005/Atom"
55 title = elementTree.find("{%s}title" % ns).text.strip()
56 link = ''
58 for i in elementTree.findall("{%s}link" % ns):
59 if i.get("type") == "text/html" and i.get("rel") == "alternate":
60 link = i.get("href")
61 break
63 items = []
65 for i in elementTree.findall("{%s}entry" % ns)[:MAX_ITEMS]:
66 it_title = i.find("{%s}title" % ns).text.strip()
67 it_link = ''
69 for j in i.findall("{%s}link" % ns):
70 if j.get("type") == "text/html" and j.get("rel") == "alternate":
71 it_link = j.get("href")
72 break
74 items.append((it_title, it_link))
76 return (title, link, items)
79 def _to_html(dtnow, docstruct):
80 datetime_str = dtnow.strftime("%Y-%m-%d %H:%M %Z")
81 page_title = "myrss -- %s" % datetime_str
83 root = html.HTML("html")
85 header = root.header
86 header.title(page_title)
87 header.link(rel="stylesheet", type="text/css", href="index.css")
89 body = root.body
90 body.h1(page_title)
92 link_z = 0
94 for feed in docstruct:
95 if feed is None:
96 continue
98 (title, link, items) = feed
100 body.h2.a(title, href=link, klass="z%d" % (link_z % MAX_LINK_Z))
101 link_z += 1
102 p = body.p
104 for (i, (it_title, it_link)) in enumerate(items):
105 if i > 0:
106 p += " - "
108 p.a(it_title, href=it_link, klass="z%d" % (link_z % MAX_LINK_Z))
109 link_z += 1
111 dtdelta = datetime.datetime.now() - dtnow
112 root.div("%.3f" % (dtdelta.days*86400 + dtdelta.seconds + dtdelta.microseconds/1e6), klass="debug")
114 return unicode(root).encode("utf-8")
117 def _process_url(url):
118 ret = None
120 try:
121 logging.info("processing %s" % url)
122 feed = urllib2.urlopen(urllib2.Request(url, headers={"User-Agent": ''}), timeout=URLOPEN_TIMEOUT)
123 except urllib2.HTTPError as e:
124 logging.info("(%s) %s" % (url, e))
125 return ret
127 elementTree = xml.etree.ElementTree.parse(feed)
128 root = elementTree.getroot()
130 parsed_root_tag = _parse_root_tag(root.tag)
132 if parsed_root_tag == (None, "rss"):
133 version = float(root.get("version", 0.0))
134 if version >= 2.0:
135 ret = _go_rss(elementTree)
136 else:
137 raise NotImplementedError("Unsupported rss version")
138 elif parsed_root_tag == ("http://www.w3.org/2005/Atom", "feed"):
139 ret = _go_atom(elementTree)
140 else:
141 raise NotImplementedError("Unknown root tag")
143 return ret
146 class WorkerThread(threading.Thread):
147 def __init__(self, *args, **kwargs):
148 self._input_queue = kwargs.pop("input_queue")
149 self._output_queue = kwargs.pop("output_queue")
150 threading.Thread.__init__(self, *args, **kwargs)
151 self.daemon = True
153 def run(self):
154 while True:
155 (idx, url) = self._input_queue.get()
156 docfeed = None
157 try:
158 docfeed = _process_url(url)
159 except Exception as e:
160 logging.info("(%s) exception: %s" % (url, e))
161 self._output_queue.put((idx, docfeed))
164 def main(input_queue, output_queue, lock):
165 ret = ''
167 with lock:
168 epoch_now = time.time()
169 dtnow = datetime.datetime.fromtimestamp(epoch_now)
171 if os.path.exists(CACHE_HTML_FILE) and (epoch_now - os.stat(CACHE_HTML_FILE).st_mtime) < float(CACHE_LIFE):
172 with open(CACHE_HTML_FILE) as cache_html_file:
173 ret = cache_html_file.read()
175 else:
176 with open(FEEDS_FILE) as feeds_file:
177 feedlines = feeds_file.readlines()
179 docstruct = [None]*len(feedlines)
180 num_input = 0
181 for (i, l) in enumerate(feedlines):
182 if l[0] != '#':
183 l = l.strip()
184 input_queue.put((i, l))
185 num_input += 1
187 for _ in range(num_input):
188 (idx, docfeed) = output_queue.get()
189 docstruct[idx] = docfeed
191 ret = _to_html(dtnow, docstruct)
193 with open(CACHE_HTML_FILE, 'w') as cache_html_file:
194 cache_html_file.write(ret)
196 return ret
199 class MyRssApp:
200 def __init__(self):
201 self._iq = Queue.Queue(MAX_THREADS)
202 self._oq = Queue.Queue(MAX_THREADS)
203 self._main_lock = threading.Lock()
205 for _ in range(MAX_THREADS):
206 WorkerThread(input_queue=self._iq, output_queue=self._oq).start()
208 def __call__(self, environ, start_response):
209 response_body = main(self._iq, self._oq, self._main_lock)
210 response_headers = [
211 ("Content-Type", "text/html"),
212 ("Content-Length", str(len(response_body))),
213 ]
214 start_response("200 OK", response_headers)
216 return [response_body]