rev |
line source |
paulo@39
|
1 import os
|
paulo@40
|
2 import sys
|
paulo@39
|
3 import re
|
paulo@40
|
4 import urllib2
|
paulo@40
|
5 import threading
|
paulo@40
|
6 import Queue
|
paulo@41
|
7 import datetime
|
paulo@41
|
8 import time
|
paulo@70
|
9 import traceback
|
paulo@47
|
10
|
paulo@42
|
11 import logging
|
paulo@94
|
12 #logging.basicConfig(
|
paulo@94
|
13 # level=logging.DEBUG,
|
paulo@94
|
14 # filename="_LOG",
|
paulo@94
|
15 # format="%(asctime)s %(levelname)-8s %(message)s",
|
paulo@94
|
16 #)
|
paulo@39
|
17
|
paulo@47
|
18 import xml.etree.ElementTree
|
paulo@47
|
19 import HTMLParser
|
paulo@47
|
20
|
paulo@39
|
21 import html
|
paulo@39
|
22
|
paulo@39
|
23
|
paulo@41
|
24 FEEDS_FILE = "FEEDS"
|
paulo@41
|
25 CACHE_HTML_FILE = "__cache__.html"
|
paulo@41
|
26
|
paulo@44
|
27 CACHE_LIFE = 1200 # [seconds]
|
paulo@47
|
28 MAX_ITEMS = 50
|
paulo@39
|
29 MAX_LINK_Z = 4
|
paulo@40
|
30 MAX_THREADS = 20
|
paulo@46
|
31 URLOPEN_TIMEOUT = 60 # [seconds]
|
paulo@39
|
32
|
paulo@39
|
33
|
paulo@39
|
34 _PARSE_ROOT_TAG_RE = re.compile(r"(\{(.+)\})?(.+)")
|
paulo@39
|
35
|
paulo@39
|
36 def _parse_root_tag(root_tag):
|
paulo@39
|
37 re_match = _PARSE_ROOT_TAG_RE.match(root_tag)
|
paulo@39
|
38
|
paulo@39
|
39 if re_match is None:
|
paulo@39
|
40 return (None, None)
|
paulo@39
|
41 else:
|
paulo@39
|
42 return re_match.group(2, 3)
|
paulo@39
|
43
|
paulo@39
|
44
|
paulo@47
|
45 def _strip_if_not_none(txt):
|
paulo@47
|
46 return txt.strip() if txt is not None else ''
|
paulo@47
|
47
|
paulo@47
|
48
|
paulo@39
|
49 def _go_rss(elementTree):
|
paulo@47
|
50 title = _strip_if_not_none(elementTree.find("channel/title").text)
|
paulo@39
|
51 link = elementTree.find("channel/link").text
|
paulo@39
|
52
|
paulo@39
|
53 items = []
|
paulo@39
|
54
|
paulo@39
|
55 for i in elementTree.findall("channel/item")[:MAX_ITEMS]:
|
paulo@47
|
56 it_title = _strip_if_not_none(i.find("title").text)
|
paulo@39
|
57 it_link = i.find("link").text
|
paulo@39
|
58
|
paulo@39
|
59 items.append((it_title, it_link))
|
paulo@39
|
60
|
paulo@39
|
61 return (title, link, items)
|
paulo@39
|
62
|
paulo@39
|
63
|
paulo@39
|
64 def _go_atom(elementTree):
|
paulo@39
|
65 ns = "http://www.w3.org/2005/Atom"
|
paulo@39
|
66
|
paulo@47
|
67 title = _strip_if_not_none(elementTree.find("{%s}title" % ns).text)
|
paulo@39
|
68 link = ''
|
paulo@39
|
69
|
paulo@76
|
70 links = elementTree.findall("{%s}link" % ns)
|
paulo@76
|
71 for i in links:
|
paulo@76
|
72 if len(links) == 1 or i.get("rel") == "alternate":
|
paulo@39
|
73 link = i.get("href")
|
paulo@39
|
74 break
|
paulo@39
|
75
|
paulo@39
|
76 items = []
|
paulo@39
|
77
|
paulo@39
|
78 for i in elementTree.findall("{%s}entry" % ns)[:MAX_ITEMS]:
|
paulo@47
|
79 it_title = _strip_if_not_none(i.find("{%s}title" % ns).text)
|
paulo@39
|
80 it_link = ''
|
paulo@39
|
81
|
paulo@76
|
82 it_links = i.findall("{%s}link" % ns)
|
paulo@76
|
83 for j in it_links:
|
paulo@76
|
84 if len(it_links) == 1 or j.get("rel") == "alternate":
|
paulo@39
|
85 it_link = j.get("href")
|
paulo@39
|
86 break
|
paulo@39
|
87
|
paulo@39
|
88 items.append((it_title, it_link))
|
paulo@39
|
89
|
paulo@39
|
90 return (title, link, items)
|
paulo@39
|
91
|
paulo@39
|
92
|
paulo@69
|
93 def _go_purl_rss(elementTree):
|
paulo@69
|
94 ns = "http://purl.org/rss/1.0/"
|
paulo@69
|
95
|
paulo@69
|
96 title = _strip_if_not_none(elementTree.find("{%s}channel/{%s}title" % (ns, ns)).text)
|
paulo@69
|
97 link = elementTree.find("{%s}channel/{%s}link" % (ns, ns)).text
|
paulo@69
|
98
|
paulo@69
|
99 items = []
|
paulo@69
|
100
|
paulo@69
|
101 for i in elementTree.findall("{%s}item" % ns)[:MAX_ITEMS]:
|
paulo@69
|
102 it_title = _strip_if_not_none(i.find("{%s}title" % ns).text)
|
paulo@69
|
103 it_link = i.find("{%s}link" % ns).text
|
paulo@69
|
104
|
paulo@69
|
105 items.append((it_title, it_link))
|
paulo@69
|
106
|
paulo@69
|
107 return (title, link, items)
|
paulo@69
|
108
|
paulo@69
|
109
|
paulo@47
|
110 _STRIP_HTML_RE = re.compile(r"<.*?>")
|
paulo@47
|
111 _htmlParser = HTMLParser.HTMLParser()
|
paulo@47
|
112
|
paulo@47
|
113 def _strip_html(txt):
|
paulo@47
|
114 return _htmlParser.unescape(_STRIP_HTML_RE.sub('', txt))
|
paulo@47
|
115
|
paulo@47
|
116
|
paulo@41
|
117 def _to_html(dtnow, docstruct):
|
paulo@41
|
118 datetime_str = dtnow.strftime("%Y-%m-%d %H:%M %Z")
|
paulo@41
|
119 page_title = "myrss -- %s" % datetime_str
|
paulo@41
|
120
|
paulo@42
|
121 root = html.HTML("html")
|
paulo@39
|
122
|
paulo@96
|
123 header = root.head
|
paulo@97
|
124 header.meta(name="viewport", content="width=device-width, initial-scale=1")
|
paulo@41
|
125 header.title(page_title)
|
paulo@39
|
126 header.link(rel="stylesheet", type="text/css", href="index.css")
|
paulo@39
|
127
|
paulo@41
|
128 body = root.body
|
paulo@41
|
129 body.h1(page_title)
|
paulo@41
|
130
|
paulo@39
|
131 link_z = 0
|
paulo@39
|
132
|
paulo@39
|
133 for feed in docstruct:
|
paulo@40
|
134 if feed is None:
|
paulo@40
|
135 continue
|
paulo@40
|
136
|
paulo@39
|
137 (title, link, items) = feed
|
paulo@39
|
138
|
paulo@97
|
139 logging.debug("title: %s", title)
|
paulo@47
|
140 body.h2.a(_strip_html(title), href=link, klass="z%d" % (link_z % MAX_LINK_Z))
|
paulo@39
|
141 link_z += 1
|
paulo@41
|
142 p = body.p
|
paulo@39
|
143
|
paulo@39
|
144 for (i, (it_title, it_link)) in enumerate(items):
|
paulo@39
|
145 if i > 0:
|
paulo@39
|
146 p += " - "
|
paulo@39
|
147
|
paulo@72
|
148 if not it_title:
|
paulo@72
|
149 it_title = "(missing title)"
|
paulo@72
|
150 if it_link is not None:
|
paulo@72
|
151 p.a(_strip_html(it_title), href=it_link, klass="z%d" % (link_z % MAX_LINK_Z))
|
paulo@72
|
152 else:
|
paulo@72
|
153 p += _strip_html(it_title)
|
paulo@72
|
154
|
paulo@39
|
155 link_z += 1
|
paulo@39
|
156
|
paulo@46
|
157 dtdelta = datetime.datetime.now() - dtnow
|
paulo@46
|
158 root.div("%.3f" % (dtdelta.days*86400 + dtdelta.seconds + dtdelta.microseconds/1e6), klass="debug")
|
paulo@46
|
159
|
paulo@39
|
160 return unicode(root).encode("utf-8")
|
paulo@39
|
161
|
paulo@39
|
162
|
paulo@47
|
163 def _fetch_url(url):
|
paulo@40
|
164 try:
|
paulo@42
|
165 logging.info("processing %s" % url)
|
paulo@88
|
166 feed = urllib2.urlopen(urllib2.Request(url, headers={"User-Agent": "Mozilla/5.0 Browser"}), timeout=URLOPEN_TIMEOUT)
|
paulo@40
|
167 except urllib2.HTTPError as e:
|
paulo@42
|
168 logging.info("(%s) %s" % (url, e))
|
paulo@47
|
169 return None
|
paulo@47
|
170
|
paulo@47
|
171 return feed
|
paulo@47
|
172
|
paulo@47
|
173
|
paulo@47
|
174 def _process_feed(feed):
|
paulo@47
|
175 ret = None
|
paulo@40
|
176
|
paulo@40
|
177 elementTree = xml.etree.ElementTree.parse(feed)
|
paulo@40
|
178 root = elementTree.getroot()
|
paulo@40
|
179
|
paulo@40
|
180 parsed_root_tag = _parse_root_tag(root.tag)
|
paulo@40
|
181
|
paulo@40
|
182 if parsed_root_tag == (None, "rss"):
|
paulo@40
|
183 version = float(root.get("version", 0.0))
|
paulo@40
|
184 if version >= 2.0:
|
paulo@40
|
185 ret = _go_rss(elementTree)
|
paulo@40
|
186 else:
|
paulo@40
|
187 raise NotImplementedError("Unsupported rss version")
|
paulo@40
|
188 elif parsed_root_tag == ("http://www.w3.org/2005/Atom", "feed"):
|
paulo@40
|
189 ret = _go_atom(elementTree)
|
paulo@69
|
190 elif parsed_root_tag == ("http://www.w3.org/1999/02/22-rdf-syntax-ns#", "RDF"):
|
paulo@69
|
191 ret = _go_purl_rss(elementTree)
|
paulo@40
|
192 else:
|
paulo@40
|
193 raise NotImplementedError("Unknown root tag")
|
paulo@40
|
194
|
paulo@40
|
195 return ret
|
paulo@40
|
196
|
paulo@40
|
197
|
paulo@40
|
198 class WorkerThread(threading.Thread):
|
paulo@40
|
199 def __init__(self, *args, **kwargs):
|
paulo@40
|
200 self._input_queue = kwargs.pop("input_queue")
|
paulo@40
|
201 self._output_queue = kwargs.pop("output_queue")
|
paulo@40
|
202 threading.Thread.__init__(self, *args, **kwargs)
|
paulo@40
|
203 self.daemon = True
|
paulo@40
|
204
|
paulo@40
|
205 def run(self):
|
paulo@40
|
206 while True:
|
paulo@40
|
207 (idx, url) = self._input_queue.get()
|
paulo@40
|
208 docfeed = None
|
paulo@40
|
209 try:
|
paulo@47
|
210 feed = _fetch_url(url)
|
paulo@47
|
211 if feed is not None:
|
paulo@47
|
212 docfeed = _process_feed(feed)
|
paulo@40
|
213 except Exception as e:
|
paulo@88
|
214 logging.info("(%s) exception: (%s) %s" % (url, type(e), e))
|
paulo@40
|
215 self._output_queue.put((idx, docfeed))
|
paulo@40
|
216
|
paulo@40
|
217
|
paulo@44
|
218 def main(input_queue, output_queue, lock):
|
paulo@41
|
219 ret = ''
|
paulo@41
|
220
|
paulo@44
|
221 with lock:
|
paulo@94
|
222 logging.debug("main() started")
|
paulo@44
|
223 epoch_now = time.time()
|
paulo@44
|
224 dtnow = datetime.datetime.fromtimestamp(epoch_now)
|
paulo@41
|
225
|
paulo@44
|
226 if os.path.exists(CACHE_HTML_FILE) and (epoch_now - os.stat(CACHE_HTML_FILE).st_mtime) < float(CACHE_LIFE):
|
paulo@44
|
227 with open(CACHE_HTML_FILE) as cache_html_file:
|
paulo@44
|
228 ret = cache_html_file.read()
|
paulo@41
|
229
|
paulo@44
|
230 else:
|
paulo@44
|
231 with open(FEEDS_FILE) as feeds_file:
|
paulo@44
|
232 feedlines = feeds_file.readlines()
|
paulo@41
|
233
|
paulo@44
|
234 docstruct = [None]*len(feedlines)
|
paulo@44
|
235 num_input = 0
|
paulo@44
|
236 for (i, l) in enumerate(feedlines):
|
paulo@44
|
237 if l[0] != '#':
|
paulo@44
|
238 l = l.strip()
|
paulo@44
|
239 input_queue.put((i, l))
|
paulo@44
|
240 num_input += 1
|
paulo@41
|
241
|
paulo@44
|
242 for _ in range(num_input):
|
paulo@44
|
243 (idx, docfeed) = output_queue.get()
|
paulo@44
|
244 docstruct[idx] = docfeed
|
paulo@41
|
245
|
paulo@44
|
246 ret = _to_html(dtnow, docstruct)
|
paulo@41
|
247
|
paulo@44
|
248 with open(CACHE_HTML_FILE, 'w') as cache_html_file:
|
paulo@44
|
249 cache_html_file.write(ret)
|
paulo@41
|
250
|
paulo@41
|
251 return ret
|
paulo@41
|
252
|
paulo@41
|
253
|
paulo@42
|
254 class MyRssApp:
|
paulo@42
|
255 def __init__(self):
|
paulo@94
|
256 logging.debug("MyRssApp.__init__() called")
|
paulo@42
|
257 self._iq = Queue.Queue(MAX_THREADS)
|
paulo@42
|
258 self._oq = Queue.Queue(MAX_THREADS)
|
paulo@44
|
259 self._main_lock = threading.Lock()
|
paulo@39
|
260
|
paulo@94
|
261 for i in range(MAX_THREADS):
|
paulo@94
|
262 logging.debug("Starting thread: %d" % i)
|
paulo@42
|
263 WorkerThread(input_queue=self._iq, output_queue=self._oq).start()
|
paulo@42
|
264
|
paulo@42
|
265 def __call__(self, environ, start_response):
|
paulo@70
|
266 response_code = "500 Internal Server Error"
|
paulo@70
|
267 response_type = "text/plain; charset=UTF-8"
|
paulo@70
|
268
|
paulo@70
|
269 try:
|
paulo@70
|
270 response_body = main(self._iq, self._oq, self._main_lock)
|
paulo@70
|
271 response_code = "200 OK"
|
paulo@70
|
272 response_type = "text/html; charset=UTF-8"
|
paulo@70
|
273 except:
|
paulo@70
|
274 response_body = traceback.format_exc()
|
paulo@70
|
275
|
paulo@42
|
276 response_headers = [
|
paulo@70
|
277 ("Content-Type", response_type),
|
paulo@42
|
278 ("Content-Length", str(len(response_body))),
|
paulo@42
|
279 ]
|
paulo@70
|
280 start_response(response_code, response_headers)
|
paulo@42
|
281
|
paulo@42
|
282 return [response_body]
|