Mercurial > hg > index.fcgi > www > www-1
view myrss/myrss_app.py @ 74:d6acf8b093b7
myrss: update user-agent string to "Mozilla/5.0" to fix servers that insist on one
author | paulo |
---|---|
date | Wed, 27 Jan 2016 01:50:15 -0700 |
parents | c7bbd3805509 |
children | 51f0da3da721 |
line source
1 import os
2 import sys
3 import re
4 import urllib2
5 import threading
6 import Queue
7 import datetime
8 import time
9 import traceback
11 import logging
12 logging.basicConfig(
13 #level=logging.DEBUG,
14 #filename="_LOG",
15 #format="%(asctime)s %(levelname)-8s %(message)s",
16 )
18 import xml.etree.ElementTree
19 import HTMLParser
21 import html
24 FEEDS_FILE = "FEEDS"
25 CACHE_HTML_FILE = "__cache__.html"
27 CACHE_LIFE = 1200 # [seconds]
28 MAX_ITEMS = 50
29 MAX_LINK_Z = 4
30 MAX_THREADS = 20
31 URLOPEN_TIMEOUT = 60 # [seconds]
34 _PARSE_ROOT_TAG_RE = re.compile(r"(\{(.+)\})?(.+)")
36 def _parse_root_tag(root_tag):
37 re_match = _PARSE_ROOT_TAG_RE.match(root_tag)
39 if re_match is None:
40 return (None, None)
41 else:
42 return re_match.group(2, 3)
45 def _strip_if_not_none(txt):
46 return txt.strip() if txt is not None else ''
49 def _go_rss(elementTree):
50 title = _strip_if_not_none(elementTree.find("channel/title").text)
51 link = elementTree.find("channel/link").text
53 items = []
55 for i in elementTree.findall("channel/item")[:MAX_ITEMS]:
56 it_title = _strip_if_not_none(i.find("title").text)
57 it_link = i.find("link").text
59 items.append((it_title, it_link))
61 return (title, link, items)
64 def _go_atom(elementTree):
65 ns = "http://www.w3.org/2005/Atom"
67 title = _strip_if_not_none(elementTree.find("{%s}title" % ns).text)
68 link = ''
70 for i in elementTree.findall("{%s}link" % ns):
71 if i.get("type") == "text/html" and i.get("rel") == "alternate":
72 link = i.get("href")
73 break
75 items = []
77 for i in elementTree.findall("{%s}entry" % ns)[:MAX_ITEMS]:
78 it_title = _strip_if_not_none(i.find("{%s}title" % ns).text)
79 it_link = ''
81 for j in i.findall("{%s}link" % ns):
82 if j.get("type") == "text/html" and j.get("rel") == "alternate":
83 it_link = j.get("href")
84 break
86 items.append((it_title, it_link))
88 return (title, link, items)
91 def _go_purl_rss(elementTree):
92 ns = "http://purl.org/rss/1.0/"
94 title = _strip_if_not_none(elementTree.find("{%s}channel/{%s}title" % (ns, ns)).text)
95 link = elementTree.find("{%s}channel/{%s}link" % (ns, ns)).text
97 items = []
99 for i in elementTree.findall("{%s}item" % ns)[:MAX_ITEMS]:
100 it_title = _strip_if_not_none(i.find("{%s}title" % ns).text)
101 it_link = i.find("{%s}link" % ns).text
103 items.append((it_title, it_link))
105 return (title, link, items)
108 _STRIP_HTML_RE = re.compile(r"<.*?>")
109 _htmlParser = HTMLParser.HTMLParser()
111 def _strip_html(txt):
112 return _htmlParser.unescape(_STRIP_HTML_RE.sub('', txt))
115 def _to_html(dtnow, docstruct):
116 datetime_str = dtnow.strftime("%Y-%m-%d %H:%M %Z")
117 page_title = "myrss -- %s" % datetime_str
119 root = html.HTML("html")
121 header = root.header
122 header.title(page_title)
123 header.link(rel="stylesheet", type="text/css", href="index.css")
125 body = root.body
126 body.h1(page_title)
128 link_z = 0
130 for feed in docstruct:
131 if feed is None:
132 continue
134 (title, link, items) = feed
136 body.h2.a(_strip_html(title), href=link, klass="z%d" % (link_z % MAX_LINK_Z))
137 link_z += 1
138 p = body.p
140 for (i, (it_title, it_link)) in enumerate(items):
141 if i > 0:
142 p += " - "
144 if not it_title:
145 it_title = "(missing title)"
146 if it_link is not None:
147 p.a(_strip_html(it_title), href=it_link, klass="z%d" % (link_z % MAX_LINK_Z))
148 else:
149 p += _strip_html(it_title)
151 link_z += 1
153 dtdelta = datetime.datetime.now() - dtnow
154 root.div("%.3f" % (dtdelta.days*86400 + dtdelta.seconds + dtdelta.microseconds/1e6), klass="debug")
156 return unicode(root).encode("utf-8")
159 def _fetch_url(url):
160 try:
161 logging.info("processing %s" % url)
162 feed = urllib2.urlopen(urllib2.Request(url, headers={"User-Agent": "Mozilla/5.0"}), timeout=URLOPEN_TIMEOUT)
163 except urllib2.HTTPError as e:
164 logging.info("(%s) %s" % (url, e))
165 return None
167 return feed
170 def _process_feed(feed):
171 ret = None
173 elementTree = xml.etree.ElementTree.parse(feed)
174 root = elementTree.getroot()
176 parsed_root_tag = _parse_root_tag(root.tag)
178 if parsed_root_tag == (None, "rss"):
179 version = float(root.get("version", 0.0))
180 if version >= 2.0:
181 ret = _go_rss(elementTree)
182 else:
183 raise NotImplementedError("Unsupported rss version")
184 elif parsed_root_tag == ("http://www.w3.org/2005/Atom", "feed"):
185 ret = _go_atom(elementTree)
186 elif parsed_root_tag == ("http://www.w3.org/1999/02/22-rdf-syntax-ns#", "RDF"):
187 ret = _go_purl_rss(elementTree)
188 else:
189 raise NotImplementedError("Unknown root tag")
191 return ret
194 class WorkerThread(threading.Thread):
195 def __init__(self, *args, **kwargs):
196 self._input_queue = kwargs.pop("input_queue")
197 self._output_queue = kwargs.pop("output_queue")
198 threading.Thread.__init__(self, *args, **kwargs)
199 self.daemon = True
201 def run(self):
202 while True:
203 (idx, url) = self._input_queue.get()
204 docfeed = None
205 try:
206 feed = _fetch_url(url)
207 if feed is not None:
208 docfeed = _process_feed(feed)
209 except Exception as e:
210 logging.info("(%s) exception: %s" % (url, e))
211 self._output_queue.put((idx, docfeed))
214 def main(input_queue, output_queue, lock):
215 ret = ''
217 with lock:
218 epoch_now = time.time()
219 dtnow = datetime.datetime.fromtimestamp(epoch_now)
221 if os.path.exists(CACHE_HTML_FILE) and (epoch_now - os.stat(CACHE_HTML_FILE).st_mtime) < float(CACHE_LIFE):
222 with open(CACHE_HTML_FILE) as cache_html_file:
223 ret = cache_html_file.read()
225 else:
226 with open(FEEDS_FILE) as feeds_file:
227 feedlines = feeds_file.readlines()
229 docstruct = [None]*len(feedlines)
230 num_input = 0
231 for (i, l) in enumerate(feedlines):
232 if l[0] != '#':
233 l = l.strip()
234 input_queue.put((i, l))
235 num_input += 1
237 for _ in range(num_input):
238 (idx, docfeed) = output_queue.get()
239 docstruct[idx] = docfeed
241 ret = _to_html(dtnow, docstruct)
243 with open(CACHE_HTML_FILE, 'w') as cache_html_file:
244 cache_html_file.write(ret)
246 return ret
249 class MyRssApp:
250 def __init__(self):
251 self._iq = Queue.Queue(MAX_THREADS)
252 self._oq = Queue.Queue(MAX_THREADS)
253 self._main_lock = threading.Lock()
255 for _ in range(MAX_THREADS):
256 WorkerThread(input_queue=self._iq, output_queue=self._oq).start()
258 def __call__(self, environ, start_response):
259 response_code = "500 Internal Server Error"
260 response_type = "text/plain; charset=UTF-8"
262 try:
263 response_body = main(self._iq, self._oq, self._main_lock)
264 response_code = "200 OK"
265 response_type = "text/html; charset=UTF-8"
266 except:
267 response_body = traceback.format_exc()
269 response_headers = [
270 ("Content-Type", response_type),
271 ("Content-Length", str(len(response_body))),
272 ]
273 start_response(response_code, response_headers)
275 return [response_body]