view myrss/myrss_app.py @ 68:66a232bae83c

myrss: add commented out debug logging code
author paulo
date Thu, 11 Jun 2015 21:25:03 -0700
parents 315afeb47e52
children ae0f2f438a95
line source
1 import os
2 import sys
3 import re
4 import urllib2
5 import threading
6 import Queue
7 import datetime
8 import time
10 import logging
11 logging.basicConfig(
12 level=logging.INFO,
13 #filename="_LOG",
14 #format="%(asctime)s %(levelname)-8s %(message)s",
15 )
17 import xml.etree.ElementTree
18 import HTMLParser
20 import html
23 FEEDS_FILE = "FEEDS"
24 CACHE_HTML_FILE = "__cache__.html"
26 CACHE_LIFE = 1200 # [seconds]
27 MAX_ITEMS = 50
28 MAX_LINK_Z = 4
29 MAX_THREADS = 20
30 URLOPEN_TIMEOUT = 60 # [seconds]
33 _PARSE_ROOT_TAG_RE = re.compile(r"(\{(.+)\})?(.+)")
35 def _parse_root_tag(root_tag):
36 re_match = _PARSE_ROOT_TAG_RE.match(root_tag)
38 if re_match is None:
39 return (None, None)
40 else:
41 return re_match.group(2, 3)
44 def _strip_if_not_none(txt):
45 return txt.strip() if txt is not None else ''
48 def _go_rss(elementTree):
49 title = _strip_if_not_none(elementTree.find("channel/title").text)
50 link = elementTree.find("channel/link").text
52 items = []
54 for i in elementTree.findall("channel/item")[:MAX_ITEMS]:
55 it_title = _strip_if_not_none(i.find("title").text)
56 it_link = i.find("link").text
58 items.append((it_title, it_link))
60 return (title, link, items)
63 def _go_atom(elementTree):
64 ns = "http://www.w3.org/2005/Atom"
66 title = _strip_if_not_none(elementTree.find("{%s}title" % ns).text)
67 link = ''
69 for i in elementTree.findall("{%s}link" % ns):
70 if i.get("type") == "text/html" and i.get("rel") == "alternate":
71 link = i.get("href")
72 break
74 items = []
76 for i in elementTree.findall("{%s}entry" % ns)[:MAX_ITEMS]:
77 it_title = _strip_if_not_none(i.find("{%s}title" % ns).text)
78 it_link = ''
80 for j in i.findall("{%s}link" % ns):
81 if j.get("type") == "text/html" and j.get("rel") == "alternate":
82 it_link = j.get("href")
83 break
85 items.append((it_title, it_link))
87 return (title, link, items)
90 _STRIP_HTML_RE = re.compile(r"<.*?>")
91 _htmlParser = HTMLParser.HTMLParser()
93 def _strip_html(txt):
94 return _htmlParser.unescape(_STRIP_HTML_RE.sub('', txt))
97 def _to_html(dtnow, docstruct):
98 datetime_str = dtnow.strftime("%Y-%m-%d %H:%M %Z")
99 page_title = "myrss -- %s" % datetime_str
101 root = html.HTML("html")
103 header = root.header
104 header.title(page_title)
105 header.link(rel="stylesheet", type="text/css", href="index.css")
107 body = root.body
108 body.h1(page_title)
110 link_z = 0
112 for feed in docstruct:
113 if feed is None:
114 continue
116 (title, link, items) = feed
118 body.h2.a(_strip_html(title), href=link, klass="z%d" % (link_z % MAX_LINK_Z))
119 link_z += 1
120 p = body.p
122 for (i, (it_title, it_link)) in enumerate(items):
123 if i > 0:
124 p += " - "
126 p.a(_strip_html(it_title), href=it_link, klass="z%d" % (link_z % MAX_LINK_Z))
127 link_z += 1
129 dtdelta = datetime.datetime.now() - dtnow
130 root.div("%.3f" % (dtdelta.days*86400 + dtdelta.seconds + dtdelta.microseconds/1e6), klass="debug")
132 return unicode(root).encode("utf-8")
135 def _fetch_url(url):
136 try:
137 logging.info("processing %s" % url)
138 feed = urllib2.urlopen(urllib2.Request(url, headers={"User-Agent": ''}), timeout=URLOPEN_TIMEOUT)
139 except urllib2.HTTPError as e:
140 logging.info("(%s) %s" % (url, e))
141 return None
143 return feed
146 def _process_feed(feed):
147 ret = None
149 elementTree = xml.etree.ElementTree.parse(feed)
150 root = elementTree.getroot()
152 parsed_root_tag = _parse_root_tag(root.tag)
154 if parsed_root_tag == (None, "rss"):
155 version = float(root.get("version", 0.0))
156 if version >= 2.0:
157 ret = _go_rss(elementTree)
158 else:
159 raise NotImplementedError("Unsupported rss version")
160 elif parsed_root_tag == ("http://www.w3.org/2005/Atom", "feed"):
161 ret = _go_atom(elementTree)
162 else:
163 raise NotImplementedError("Unknown root tag")
165 return ret
168 class WorkerThread(threading.Thread):
169 def __init__(self, *args, **kwargs):
170 self._input_queue = kwargs.pop("input_queue")
171 self._output_queue = kwargs.pop("output_queue")
172 threading.Thread.__init__(self, *args, **kwargs)
173 self.daemon = True
175 def run(self):
176 while True:
177 (idx, url) = self._input_queue.get()
178 docfeed = None
179 try:
180 feed = _fetch_url(url)
181 if feed is not None:
182 docfeed = _process_feed(feed)
183 except Exception as e:
184 logging.info("(%s) exception: %s" % (url, e))
185 self._output_queue.put((idx, docfeed))
188 def main(input_queue, output_queue, lock):
189 ret = ''
191 with lock:
192 epoch_now = time.time()
193 dtnow = datetime.datetime.fromtimestamp(epoch_now)
195 if os.path.exists(CACHE_HTML_FILE) and (epoch_now - os.stat(CACHE_HTML_FILE).st_mtime) < float(CACHE_LIFE):
196 with open(CACHE_HTML_FILE) as cache_html_file:
197 ret = cache_html_file.read()
199 else:
200 with open(FEEDS_FILE) as feeds_file:
201 feedlines = feeds_file.readlines()
203 docstruct = [None]*len(feedlines)
204 num_input = 0
205 for (i, l) in enumerate(feedlines):
206 if l[0] != '#':
207 l = l.strip()
208 input_queue.put((i, l))
209 num_input += 1
211 for _ in range(num_input):
212 (idx, docfeed) = output_queue.get()
213 docstruct[idx] = docfeed
215 ret = _to_html(dtnow, docstruct)
217 with open(CACHE_HTML_FILE, 'w') as cache_html_file:
218 cache_html_file.write(ret)
220 return ret
223 class MyRssApp:
224 def __init__(self):
225 self._iq = Queue.Queue(MAX_THREADS)
226 self._oq = Queue.Queue(MAX_THREADS)
227 self._main_lock = threading.Lock()
229 for _ in range(MAX_THREADS):
230 WorkerThread(input_queue=self._iq, output_queue=self._oq).start()
232 def __call__(self, environ, start_response):
233 response_body = main(self._iq, self._oq, self._main_lock)
234 response_headers = [
235 ("Content-Type", "text/html; charset=UTF-8"),
236 ("Content-Length", str(len(response_body))),
237 ]
238 start_response("200 OK", response_headers)
240 return [response_body]