rev |
line source |
paulo@39
|
1 import os
|
paulo@40
|
2 import sys
|
paulo@39
|
3 import re
|
paulo@40
|
4 import urllib2
|
paulo@40
|
5 import threading
|
paulo@40
|
6 import Queue
|
paulo@41
|
7 import datetime
|
paulo@41
|
8 import time
|
paulo@47
|
9
|
paulo@42
|
10 import logging
|
paulo@42
|
11 logging.basicConfig(level=logging.INFO)
|
paulo@39
|
12
|
paulo@47
|
13 import xml.etree.ElementTree
|
paulo@47
|
14 import HTMLParser
|
paulo@47
|
15
|
paulo@39
|
16 import html
|
paulo@39
|
17
|
paulo@39
|
18
|
paulo@41
|
19 FEEDS_FILE = "FEEDS"
|
paulo@41
|
20 CACHE_HTML_FILE = "__cache__.html"
|
paulo@41
|
21
|
paulo@44
|
22 CACHE_LIFE = 1200 # [seconds]
|
paulo@47
|
23 MAX_ITEMS = 50
|
paulo@39
|
24 MAX_LINK_Z = 4
|
paulo@40
|
25 MAX_THREADS = 20
|
paulo@46
|
26 URLOPEN_TIMEOUT = 60 # [seconds]
|
paulo@39
|
27
|
paulo@39
|
28
|
paulo@39
|
29 _PARSE_ROOT_TAG_RE = re.compile(r"(\{(.+)\})?(.+)")
|
paulo@39
|
30
|
paulo@39
|
31 def _parse_root_tag(root_tag):
|
paulo@39
|
32 re_match = _PARSE_ROOT_TAG_RE.match(root_tag)
|
paulo@39
|
33
|
paulo@39
|
34 if re_match is None:
|
paulo@39
|
35 return (None, None)
|
paulo@39
|
36 else:
|
paulo@39
|
37 return re_match.group(2, 3)
|
paulo@39
|
38
|
paulo@39
|
39
|
paulo@47
|
40 def _strip_if_not_none(txt):
|
paulo@47
|
41 return txt.strip() if txt is not None else ''
|
paulo@47
|
42
|
paulo@47
|
43
|
paulo@39
|
44 def _go_rss(elementTree):
|
paulo@47
|
45 title = _strip_if_not_none(elementTree.find("channel/title").text)
|
paulo@39
|
46 link = elementTree.find("channel/link").text
|
paulo@39
|
47
|
paulo@39
|
48 items = []
|
paulo@39
|
49
|
paulo@39
|
50 for i in elementTree.findall("channel/item")[:MAX_ITEMS]:
|
paulo@47
|
51 it_title = _strip_if_not_none(i.find("title").text)
|
paulo@39
|
52 it_link = i.find("link").text
|
paulo@39
|
53
|
paulo@39
|
54 items.append((it_title, it_link))
|
paulo@39
|
55
|
paulo@39
|
56 return (title, link, items)
|
paulo@39
|
57
|
paulo@39
|
58
|
paulo@39
|
59 def _go_atom(elementTree):
|
paulo@39
|
60 ns = "http://www.w3.org/2005/Atom"
|
paulo@39
|
61
|
paulo@47
|
62 title = _strip_if_not_none(elementTree.find("{%s}title" % ns).text)
|
paulo@39
|
63 link = ''
|
paulo@39
|
64
|
paulo@39
|
65 for i in elementTree.findall("{%s}link" % ns):
|
paulo@39
|
66 if i.get("type") == "text/html" and i.get("rel") == "alternate":
|
paulo@39
|
67 link = i.get("href")
|
paulo@39
|
68 break
|
paulo@39
|
69
|
paulo@39
|
70 items = []
|
paulo@39
|
71
|
paulo@39
|
72 for i in elementTree.findall("{%s}entry" % ns)[:MAX_ITEMS]:
|
paulo@47
|
73 it_title = _strip_if_not_none(i.find("{%s}title" % ns).text)
|
paulo@39
|
74 it_link = ''
|
paulo@39
|
75
|
paulo@39
|
76 for j in i.findall("{%s}link" % ns):
|
paulo@39
|
77 if j.get("type") == "text/html" and j.get("rel") == "alternate":
|
paulo@39
|
78 it_link = j.get("href")
|
paulo@39
|
79 break
|
paulo@39
|
80
|
paulo@39
|
81 items.append((it_title, it_link))
|
paulo@39
|
82
|
paulo@39
|
83 return (title, link, items)
|
paulo@39
|
84
|
paulo@39
|
85
|
paulo@47
|
86 _STRIP_HTML_RE = re.compile(r"<.*?>")
|
paulo@47
|
87 _htmlParser = HTMLParser.HTMLParser()
|
paulo@47
|
88
|
paulo@47
|
89 def _strip_html(txt):
|
paulo@47
|
90 return _htmlParser.unescape(_STRIP_HTML_RE.sub('', txt))
|
paulo@47
|
91
|
paulo@47
|
92
|
paulo@41
|
93 def _to_html(dtnow, docstruct):
|
paulo@41
|
94 datetime_str = dtnow.strftime("%Y-%m-%d %H:%M %Z")
|
paulo@41
|
95 page_title = "myrss -- %s" % datetime_str
|
paulo@41
|
96
|
paulo@42
|
97 root = html.HTML("html")
|
paulo@39
|
98
|
paulo@39
|
99 header = root.header
|
paulo@41
|
100 header.title(page_title)
|
paulo@39
|
101 header.link(rel="stylesheet", type="text/css", href="index.css")
|
paulo@39
|
102
|
paulo@41
|
103 body = root.body
|
paulo@41
|
104 body.h1(page_title)
|
paulo@41
|
105
|
paulo@39
|
106 link_z = 0
|
paulo@39
|
107
|
paulo@39
|
108 for feed in docstruct:
|
paulo@40
|
109 if feed is None:
|
paulo@40
|
110 continue
|
paulo@40
|
111
|
paulo@39
|
112 (title, link, items) = feed
|
paulo@39
|
113
|
paulo@47
|
114 body.h2.a(_strip_html(title), href=link, klass="z%d" % (link_z % MAX_LINK_Z))
|
paulo@39
|
115 link_z += 1
|
paulo@41
|
116 p = body.p
|
paulo@39
|
117
|
paulo@39
|
118 for (i, (it_title, it_link)) in enumerate(items):
|
paulo@39
|
119 if i > 0:
|
paulo@39
|
120 p += " - "
|
paulo@39
|
121
|
paulo@47
|
122 p.a(_strip_html(it_title), href=it_link, klass="z%d" % (link_z % MAX_LINK_Z))
|
paulo@39
|
123 link_z += 1
|
paulo@39
|
124
|
paulo@46
|
125 dtdelta = datetime.datetime.now() - dtnow
|
paulo@46
|
126 root.div("%.3f" % (dtdelta.days*86400 + dtdelta.seconds + dtdelta.microseconds/1e6), klass="debug")
|
paulo@46
|
127
|
paulo@39
|
128 return unicode(root).encode("utf-8")
|
paulo@39
|
129
|
paulo@39
|
130
|
paulo@47
|
131 def _fetch_url(url):
|
paulo@40
|
132 try:
|
paulo@42
|
133 logging.info("processing %s" % url)
|
paulo@46
|
134 feed = urllib2.urlopen(urllib2.Request(url, headers={"User-Agent": ''}), timeout=URLOPEN_TIMEOUT)
|
paulo@40
|
135 except urllib2.HTTPError as e:
|
paulo@42
|
136 logging.info("(%s) %s" % (url, e))
|
paulo@47
|
137 return None
|
paulo@47
|
138
|
paulo@47
|
139 return feed
|
paulo@47
|
140
|
paulo@47
|
141
|
paulo@47
|
142 def _process_feed(feed):
|
paulo@47
|
143 ret = None
|
paulo@40
|
144
|
paulo@40
|
145 elementTree = xml.etree.ElementTree.parse(feed)
|
paulo@40
|
146 root = elementTree.getroot()
|
paulo@40
|
147
|
paulo@40
|
148 parsed_root_tag = _parse_root_tag(root.tag)
|
paulo@40
|
149
|
paulo@40
|
150 if parsed_root_tag == (None, "rss"):
|
paulo@40
|
151 version = float(root.get("version", 0.0))
|
paulo@40
|
152 if version >= 2.0:
|
paulo@40
|
153 ret = _go_rss(elementTree)
|
paulo@40
|
154 else:
|
paulo@40
|
155 raise NotImplementedError("Unsupported rss version")
|
paulo@40
|
156 elif parsed_root_tag == ("http://www.w3.org/2005/Atom", "feed"):
|
paulo@40
|
157 ret = _go_atom(elementTree)
|
paulo@40
|
158 else:
|
paulo@40
|
159 raise NotImplementedError("Unknown root tag")
|
paulo@40
|
160
|
paulo@40
|
161 return ret
|
paulo@40
|
162
|
paulo@40
|
163
|
paulo@40
|
164 class WorkerThread(threading.Thread):
|
paulo@40
|
165 def __init__(self, *args, **kwargs):
|
paulo@40
|
166 self._input_queue = kwargs.pop("input_queue")
|
paulo@40
|
167 self._output_queue = kwargs.pop("output_queue")
|
paulo@40
|
168 threading.Thread.__init__(self, *args, **kwargs)
|
paulo@40
|
169 self.daemon = True
|
paulo@40
|
170
|
paulo@40
|
171 def run(self):
|
paulo@40
|
172 while True:
|
paulo@40
|
173 (idx, url) = self._input_queue.get()
|
paulo@40
|
174 docfeed = None
|
paulo@40
|
175 try:
|
paulo@47
|
176 feed = _fetch_url(url)
|
paulo@47
|
177 if feed is not None:
|
paulo@47
|
178 docfeed = _process_feed(feed)
|
paulo@40
|
179 except Exception as e:
|
paulo@42
|
180 logging.info("(%s) exception: %s" % (url, e))
|
paulo@40
|
181 self._output_queue.put((idx, docfeed))
|
paulo@40
|
182
|
paulo@40
|
183
|
paulo@44
|
184 def main(input_queue, output_queue, lock):
|
paulo@41
|
185 ret = ''
|
paulo@41
|
186
|
paulo@44
|
187 with lock:
|
paulo@44
|
188 epoch_now = time.time()
|
paulo@44
|
189 dtnow = datetime.datetime.fromtimestamp(epoch_now)
|
paulo@41
|
190
|
paulo@44
|
191 if os.path.exists(CACHE_HTML_FILE) and (epoch_now - os.stat(CACHE_HTML_FILE).st_mtime) < float(CACHE_LIFE):
|
paulo@44
|
192 with open(CACHE_HTML_FILE) as cache_html_file:
|
paulo@44
|
193 ret = cache_html_file.read()
|
paulo@41
|
194
|
paulo@44
|
195 else:
|
paulo@44
|
196 with open(FEEDS_FILE) as feeds_file:
|
paulo@44
|
197 feedlines = feeds_file.readlines()
|
paulo@41
|
198
|
paulo@44
|
199 docstruct = [None]*len(feedlines)
|
paulo@44
|
200 num_input = 0
|
paulo@44
|
201 for (i, l) in enumerate(feedlines):
|
paulo@44
|
202 if l[0] != '#':
|
paulo@44
|
203 l = l.strip()
|
paulo@44
|
204 input_queue.put((i, l))
|
paulo@44
|
205 num_input += 1
|
paulo@41
|
206
|
paulo@44
|
207 for _ in range(num_input):
|
paulo@44
|
208 (idx, docfeed) = output_queue.get()
|
paulo@44
|
209 docstruct[idx] = docfeed
|
paulo@41
|
210
|
paulo@44
|
211 ret = _to_html(dtnow, docstruct)
|
paulo@41
|
212
|
paulo@44
|
213 with open(CACHE_HTML_FILE, 'w') as cache_html_file:
|
paulo@44
|
214 cache_html_file.write(ret)
|
paulo@41
|
215
|
paulo@41
|
216 return ret
|
paulo@41
|
217
|
paulo@41
|
218
|
paulo@42
|
219 class MyRssApp:
|
paulo@42
|
220 def __init__(self):
|
paulo@42
|
221 self._iq = Queue.Queue(MAX_THREADS)
|
paulo@42
|
222 self._oq = Queue.Queue(MAX_THREADS)
|
paulo@44
|
223 self._main_lock = threading.Lock()
|
paulo@39
|
224
|
paulo@42
|
225 for _ in range(MAX_THREADS):
|
paulo@42
|
226 WorkerThread(input_queue=self._iq, output_queue=self._oq).start()
|
paulo@42
|
227
|
paulo@42
|
228 def __call__(self, environ, start_response):
|
paulo@44
|
229 response_body = main(self._iq, self._oq, self._main_lock)
|
paulo@42
|
230 response_headers = [
|
paulo@47
|
231 ("Content-Type", "text/html; charset=UTF-8"),
|
paulo@42
|
232 ("Content-Length", str(len(response_body))),
|
paulo@42
|
233 ]
|
paulo@42
|
234 start_response("200 OK", response_headers)
|
paulo@42
|
235
|
paulo@42
|
236 return [response_body]
|