rev |
line source |
paulo@39
|
1 import os
|
paulo@40
|
2 import sys
|
paulo@39
|
3 import re
|
paulo@40
|
4 import urllib2
|
paulo@40
|
5 import threading
|
paulo@40
|
6 import Queue
|
paulo@41
|
7 import datetime
|
paulo@41
|
8 import time
|
paulo@39
|
9
|
paulo@39
|
10 import html
|
paulo@39
|
11 import xml.etree.ElementTree
|
paulo@39
|
12
|
paulo@39
|
13
|
paulo@41
|
14 FEEDS_FILE = "FEEDS"
|
paulo@41
|
15 CACHE_HTML_FILE = "__cache__.html"
|
paulo@41
|
16
|
paulo@41
|
17 #CACHE_LIFE = 1200 # [seconds]
|
paulo@41
|
18 CACHE_LIFE = 30 # [seconds]
|
paulo@39
|
19 MAX_ITEMS = 30
|
paulo@39
|
20 MAX_LINK_Z = 4
|
paulo@40
|
21 MAX_THREADS = 20
|
paulo@39
|
22
|
paulo@39
|
23
|
paulo@39
|
24 _PARSE_ROOT_TAG_RE = re.compile(r"(\{(.+)\})?(.+)")
|
paulo@39
|
25
|
paulo@39
|
26 def _parse_root_tag(root_tag):
|
paulo@39
|
27 re_match = _PARSE_ROOT_TAG_RE.match(root_tag)
|
paulo@39
|
28
|
paulo@39
|
29 if re_match is None:
|
paulo@39
|
30 return (None, None)
|
paulo@39
|
31 else:
|
paulo@39
|
32 return re_match.group(2, 3)
|
paulo@39
|
33
|
paulo@39
|
34
|
paulo@39
|
35 def _go_rss(elementTree):
|
paulo@39
|
36 title = elementTree.find("channel/title").text.strip()
|
paulo@39
|
37 link = elementTree.find("channel/link").text
|
paulo@39
|
38
|
paulo@39
|
39 items = []
|
paulo@39
|
40
|
paulo@39
|
41 for i in elementTree.findall("channel/item")[:MAX_ITEMS]:
|
paulo@39
|
42 it_title = i.find("title").text.strip()
|
paulo@39
|
43 it_link = i.find("link").text
|
paulo@39
|
44
|
paulo@39
|
45 items.append((it_title, it_link))
|
paulo@39
|
46
|
paulo@39
|
47 return (title, link, items)
|
paulo@39
|
48
|
paulo@39
|
49
|
paulo@39
|
50 def _go_atom(elementTree):
|
paulo@39
|
51 ns = "http://www.w3.org/2005/Atom"
|
paulo@39
|
52
|
paulo@39
|
53 title = elementTree.find("{%s}title" % ns).text.strip()
|
paulo@39
|
54 link = ''
|
paulo@39
|
55
|
paulo@39
|
56 for i in elementTree.findall("{%s}link" % ns):
|
paulo@39
|
57 if i.get("type") == "text/html" and i.get("rel") == "alternate":
|
paulo@39
|
58 link = i.get("href")
|
paulo@39
|
59 break
|
paulo@39
|
60
|
paulo@39
|
61 items = []
|
paulo@39
|
62
|
paulo@39
|
63 for i in elementTree.findall("{%s}entry" % ns)[:MAX_ITEMS]:
|
paulo@39
|
64 it_title = i.find("{%s}title" % ns).text.strip()
|
paulo@39
|
65 it_link = ''
|
paulo@39
|
66
|
paulo@39
|
67 for j in i.findall("{%s}link" % ns):
|
paulo@39
|
68 if j.get("type") == "text/html" and j.get("rel") == "alternate":
|
paulo@39
|
69 it_link = j.get("href")
|
paulo@39
|
70 break
|
paulo@39
|
71
|
paulo@39
|
72 items.append((it_title, it_link))
|
paulo@39
|
73
|
paulo@39
|
74 return (title, link, items)
|
paulo@39
|
75
|
paulo@39
|
76
|
paulo@41
|
77 def _to_html(dtnow, docstruct):
|
paulo@41
|
78 datetime_str = dtnow.strftime("%Y-%m-%d %H:%M %Z")
|
paulo@41
|
79 page_title = "myrss -- %s" % datetime_str
|
paulo@41
|
80
|
paulo@39
|
81 root = html.HTML()
|
paulo@39
|
82
|
paulo@39
|
83 header = root.header
|
paulo@41
|
84 header.title(page_title)
|
paulo@39
|
85 header.link(rel="stylesheet", type="text/css", href="index.css")
|
paulo@39
|
86
|
paulo@41
|
87 body = root.body
|
paulo@41
|
88 body.h1(page_title)
|
paulo@41
|
89
|
paulo@39
|
90 link_z = 0
|
paulo@39
|
91
|
paulo@39
|
92 for feed in docstruct:
|
paulo@40
|
93 if feed is None:
|
paulo@40
|
94 continue
|
paulo@40
|
95
|
paulo@39
|
96 (title, link, items) = feed
|
paulo@39
|
97
|
paulo@41
|
98 body.h2.a(title, href=link, klass="z%d" % (link_z % MAX_LINK_Z))
|
paulo@39
|
99 link_z += 1
|
paulo@41
|
100 p = body.p
|
paulo@39
|
101
|
paulo@39
|
102 for (i, (it_title, it_link)) in enumerate(items):
|
paulo@39
|
103 if i > 0:
|
paulo@39
|
104 p += " - "
|
paulo@39
|
105
|
paulo@39
|
106 p.a(it_title, href=it_link, klass="z%d" % (link_z % MAX_LINK_Z))
|
paulo@39
|
107 link_z += 1
|
paulo@39
|
108
|
paulo@39
|
109 return unicode(root).encode("utf-8")
|
paulo@39
|
110
|
paulo@39
|
111
|
paulo@40
|
112 def _process_url(url):
|
paulo@40
|
113 ret = None
|
paulo@40
|
114
|
paulo@40
|
115 try:
|
paulo@40
|
116 print >> sys.stderr, "--> processing %s" % url
|
paulo@41
|
117 feed = urllib2.urlopen(urllib2.Request(url, headers={"User-Agent": ''}))
|
paulo@40
|
118 except urllib2.HTTPError as e:
|
paulo@40
|
119 print >> sys.stderr, "--> (%s) %s" % (url, e)
|
paulo@40
|
120 return ret
|
paulo@40
|
121
|
paulo@40
|
122 elementTree = xml.etree.ElementTree.parse(feed)
|
paulo@40
|
123 root = elementTree.getroot()
|
paulo@40
|
124
|
paulo@40
|
125 parsed_root_tag = _parse_root_tag(root.tag)
|
paulo@40
|
126
|
paulo@40
|
127 if parsed_root_tag == (None, "rss"):
|
paulo@40
|
128 version = float(root.get("version", 0.0))
|
paulo@40
|
129 if version >= 2.0:
|
paulo@40
|
130 ret = _go_rss(elementTree)
|
paulo@40
|
131 else:
|
paulo@40
|
132 raise NotImplementedError("Unsupported rss version")
|
paulo@40
|
133 elif parsed_root_tag == ("http://www.w3.org/2005/Atom", "feed"):
|
paulo@40
|
134 ret = _go_atom(elementTree)
|
paulo@40
|
135 else:
|
paulo@40
|
136 raise NotImplementedError("Unknown root tag")
|
paulo@40
|
137
|
paulo@40
|
138 return ret
|
paulo@40
|
139
|
paulo@40
|
140
|
paulo@40
|
141 class WorkerThread(threading.Thread):
|
paulo@40
|
142 def __init__(self, *args, **kwargs):
|
paulo@40
|
143 self._input_queue = kwargs.pop("input_queue")
|
paulo@40
|
144 self._output_queue = kwargs.pop("output_queue")
|
paulo@40
|
145 threading.Thread.__init__(self, *args, **kwargs)
|
paulo@40
|
146 self.daemon = True
|
paulo@40
|
147
|
paulo@40
|
148 def run(self):
|
paulo@40
|
149 while True:
|
paulo@40
|
150 (idx, url) = self._input_queue.get()
|
paulo@40
|
151 docfeed = None
|
paulo@40
|
152 try:
|
paulo@40
|
153 docfeed = _process_url(url)
|
paulo@40
|
154 except Exception as e:
|
paulo@40
|
155 print >> sys.stderr, "--> (%s) exception: %s" % (url, e)
|
paulo@40
|
156 self._output_queue.put((idx, docfeed))
|
paulo@40
|
157 self._input_queue.task_done()
|
paulo@40
|
158
|
paulo@40
|
159
|
paulo@41
|
160 def main():
|
paulo@41
|
161 ret = ''
|
paulo@41
|
162
|
paulo@41
|
163 epoch_now = time.time()
|
paulo@41
|
164 dtnow = datetime.datetime.fromtimestamp(epoch_now)
|
paulo@41
|
165
|
paulo@41
|
166 if os.path.exists(CACHE_HTML_FILE) and (epoch_now - os.stat(CACHE_HTML_FILE).st_mtime) < float(CACHE_LIFE):
|
paulo@41
|
167 with open(CACHE_HTML_FILE) as cache_html_file:
|
paulo@41
|
168 ret = cache_html_file.read()
|
paulo@41
|
169
|
paulo@41
|
170 else:
|
paulo@41
|
171 with open(FEEDS_FILE) as feeds_file:
|
paulo@41
|
172 feedlines = feeds_file.readlines()
|
paulo@41
|
173
|
paulo@41
|
174 docstruct = [None]*len(feedlines)
|
paulo@41
|
175 iq = Queue.Queue(feedlines)
|
paulo@41
|
176 oq = Queue.Queue(feedlines)
|
paulo@41
|
177
|
paulo@41
|
178 for _ in range(MAX_THREADS):
|
paulo@41
|
179 WorkerThread(input_queue=iq, output_queue=oq).start()
|
paulo@41
|
180
|
paulo@41
|
181 for (i, l) in enumerate(feedlines):
|
paulo@41
|
182 if l[0] != '#':
|
paulo@41
|
183 l = l.strip()
|
paulo@41
|
184 iq.put((i, l))
|
paulo@41
|
185
|
paulo@41
|
186 iq.join()
|
paulo@41
|
187
|
paulo@41
|
188 while True:
|
paulo@41
|
189 try:
|
paulo@41
|
190 (idx, docfeed) = oq.get_nowait()
|
paulo@41
|
191 docstruct[idx] = docfeed
|
paulo@41
|
192 except Queue.Empty:
|
paulo@41
|
193 break
|
paulo@41
|
194
|
paulo@41
|
195 ret = _to_html(dtnow, docstruct)
|
paulo@41
|
196
|
paulo@41
|
197 with open(CACHE_HTML_FILE, 'w') as cache_html_file:
|
paulo@41
|
198 cache_html_file.write(ret)
|
paulo@41
|
199
|
paulo@41
|
200 return ret
|
paulo@41
|
201
|
paulo@41
|
202
|
paulo@39
|
203 if __name__ == "__main__":
|
paulo@41
|
204 print main()
|
paulo@39
|
205
|