view myrss2/myrss_app.py @ 141:df18c9d8857c

myrss2: FEEDS: Add theconversation.com, remove Coding Horror
author paulo
date Sun, 06 Aug 2023 17:34:26 +0000
parents cffd95813b82
children 2ed8cf5f36bf
line source
1 import datetime
2 import gzip
3 import io
4 import os
5 import queue
6 import re
7 import sys
8 import threading
9 import time
10 import traceback
11 import urllib.error
12 import urllib.request
14 import logging
15 LOG_LEVEL = os.environ.get("LOG_LEVEL", "INFO")
16 logging.basicConfig(
17 level=getattr(logging, LOG_LEVEL),
18 format="%(asctime)s %(levelname)-8s %(message)s",
19 )
21 import xml.etree.ElementTree
22 import html
24 from html3.html3 import HTML
27 FEEDS_FILE = "FEEDS"
28 CACHE_HTML_FILE = "__cache__.html"
30 CACHE_LIFE = 1200 # [seconds]
31 MAX_ITEMS = 50
32 MAX_LINK_Z = 4
33 MAX_THREADS = 20
34 URLOPEN_TIMEOUT = 10 # [seconds]
37 _PARSE_ROOT_TAG_RE = re.compile(r"(\{(.+)\})?(.+)")
39 def _parse_root_tag(root_tag):
40 re_match = _PARSE_ROOT_TAG_RE.match(root_tag)
42 if re_match is None:
43 return (None, None)
44 else:
45 return re_match.group(2, 3)
48 def _strip_if_not_none(txt):
49 return txt.strip() if txt is not None else ''
52 def _go_rss(elementTree):
53 title = _strip_if_not_none(elementTree.find("channel/title").text)
54 link = elementTree.find("channel/link").text
56 items = []
58 for i in elementTree.findall("channel/item")[:MAX_ITEMS]:
59 it_title = _strip_if_not_none(i.find("title").text)
60 it_link = i.find("link").text
62 items.append((it_title, it_link))
64 return (title, link, items)
67 def _go_atom(elementTree):
68 ns = "http://www.w3.org/2005/Atom"
70 title = _strip_if_not_none(elementTree.find("{%s}title" % ns).text)
71 link = ''
73 links = elementTree.findall("{%s}link" % ns)
74 for i in links:
75 if len(links) == 1 or i.get("rel") == "alternate":
76 link = i.get("href")
77 break
79 items = []
81 for i in elementTree.findall("{%s}entry" % ns)[:MAX_ITEMS]:
82 it_title = _strip_if_not_none(i.find("{%s}title" % ns).text)
83 it_link = ''
85 it_links = i.findall("{%s}link" % ns)
86 for j in it_links:
87 if len(it_links) == 1 or j.get("rel") == "alternate":
88 it_link = j.get("href")
89 break
91 items.append((it_title, it_link))
93 return (title, link, items)
96 def _go_purl_rss(elementTree):
97 ns = "http://purl.org/rss/1.0/"
99 title = _strip_if_not_none(elementTree.find("{%s}channel/{%s}title" % (ns, ns)).text)
100 link = elementTree.find("{%s}channel/{%s}link" % (ns, ns)).text
102 items = []
104 for i in elementTree.findall("{%s}item" % ns)[:MAX_ITEMS]:
105 it_title = _strip_if_not_none(i.find("{%s}title" % ns).text)
106 it_link = i.find("{%s}link" % ns).text
108 items.append((it_title, it_link))
110 return (title, link, items)
113 _STRIP_HTML_RE = re.compile(r"<.*?>")
115 def _strip_html(txt):
116 return html.unescape(_STRIP_HTML_RE.sub('', txt))
119 def _to_html(dtnow, docstruct):
120 datetime_str = dtnow.strftime("%Y-%m-%d %H:%M %Z")
121 page_title = "myrss -- %s" % datetime_str
123 root = HTML("html")
125 header = root.head
126 header.meta(name="viewport", content="width=device-width, initial-scale=1")
127 header.title(page_title)
128 header.link(rel="stylesheet", type="text/css", href="static/index.css")
130 body = root.body
131 body.h1(page_title)
133 link_z = 0
135 for feed in docstruct:
136 if feed is None:
137 continue
139 (title, link, items) = feed
141 logging.debug("title: %s", title)
142 body.h2.a(_strip_html(title), href=link, klass="z%d" % (link_z % MAX_LINK_Z))
143 link_z += 1
144 p = body.p
146 for (i, (it_title, it_link)) in enumerate(items):
147 if i > 0:
148 p += " - "
150 if not it_title:
151 it_title = "(missing title)"
152 if it_link is not None:
153 p.a(_strip_html(it_title), href=it_link, klass="z%d" % (link_z % MAX_LINK_Z))
154 else:
155 p += _strip_html(it_title)
157 link_z += 1
159 dtdelta = datetime.datetime.now() - dtnow
160 root.div("%.3f" % (dtdelta.days*86400 + dtdelta.seconds + dtdelta.microseconds/1e6), klass="debug")
162 return str(root)
165 def _fetch_url(url):
166 try:
167 logging.info("processing %s" % url)
168 feed = urllib.request.urlopen(urllib.request.Request(url, headers={"User-Agent": "Mozilla/5.0 Browser"}), timeout=URLOPEN_TIMEOUT)
169 response_headers = feed.info().as_string().splitlines()
170 if 'Content-Encoding: gzip' in response_headers:
171 body = gzip.decompress(feed.read())
172 else:
173 body = feed.read()
175 except urllib.error.HTTPError as e:
176 logging.info("(%s) %s" % (url, e))
177 return None
179 return str(body, encoding="utf-8")
182 def _filter_feed(feed):
183 ret = feed.strip()
185 filter_out = ["\x16"]
186 for i in filter_out:
187 ret = ret.replace(i, "")
189 return ret
192 def _process_feed(feed):
193 ret = None
195 feed_sio = io.StringIO(feed)
196 elementTree = xml.etree.ElementTree.parse(feed_sio)
197 root = elementTree.getroot()
199 parsed_root_tag = _parse_root_tag(root.tag)
201 if parsed_root_tag == (None, "rss"):
202 version = float(root.get("version", 0.0))
203 if version >= 2.0:
204 ret = _go_rss(elementTree)
205 else:
206 raise NotImplementedError("Unsupported rss version")
207 elif parsed_root_tag == ("http://www.w3.org/2005/Atom", "feed"):
208 ret = _go_atom(elementTree)
209 elif parsed_root_tag == ("http://www.w3.org/1999/02/22-rdf-syntax-ns#", "RDF"):
210 ret = _go_purl_rss(elementTree)
211 else:
212 raise NotImplementedError("Unknown root tag")
214 return ret
217 class WorkerThread(threading.Thread):
218 def __init__(self, *args, **kwargs):
219 self._input_queue = kwargs.pop("input_queue")
220 self._output_queue = kwargs.pop("output_queue")
221 threading.Thread.__init__(self, *args, **kwargs)
222 self.daemon = True
224 def run(self):
225 while True:
226 (idx, url) = self._input_queue.get()
227 docfeed = None
228 try:
229 feed = _fetch_url(url)
230 if feed is not None:
231 docfeed = _process_feed(_filter_feed(feed))
232 except Exception as e:
233 logging.info("(%s) exception: (%s) %s" % (url, type(e), e))
234 self._output_queue.put((idx, docfeed))
237 def main(input_queue, output_queue, lock):
238 ret = ''
240 with lock:
241 logging.debug("main() started")
242 epoch_now = time.time()
243 dtnow = datetime.datetime.fromtimestamp(epoch_now)
245 if os.path.exists(CACHE_HTML_FILE) and (epoch_now - os.stat(CACHE_HTML_FILE).st_mtime) < float(CACHE_LIFE):
246 with open(CACHE_HTML_FILE) as cache_html_file:
247 ret = cache_html_file.read()
249 else:
250 with open(FEEDS_FILE) as feeds_file:
251 feedlines = feeds_file.readlines()
253 docstruct = [None]*len(feedlines)
254 num_input = 0
255 for (i, l) in enumerate(feedlines):
256 if l[0] != '#':
257 l = l.strip()
258 input_queue.put((i, l))
259 num_input += 1
261 for _ in range(num_input):
262 (idx, docfeed) = output_queue.get()
263 docstruct[idx] = docfeed
265 ret = _to_html(dtnow, docstruct)
267 with open(CACHE_HTML_FILE, 'w') as cache_html_file:
268 cache_html_file.write(ret)
269 logging.debug("main() ended")
271 return ret
274 class MyRssApp:
275 def __init__(self):
276 logging.debug("MyRssApp.__init__() called")
277 self._iq = queue.Queue(MAX_THREADS)
278 self._oq = queue.Queue(MAX_THREADS)
279 self._main_lock = threading.Lock()
281 for i in range(MAX_THREADS):
282 logging.debug("Starting thread: %d" % i)
283 WorkerThread(input_queue=self._iq, output_queue=self._oq).start()
285 # Raw WSGI
286 def __call__(self, environ, start_response):
287 response_code = "500 Internal Server Error"
288 response_type = "text/plain; charset=UTF-8"
290 try:
291 response_body = main(self._iq, self._oq, self._main_lock)
292 response_code = "200 OK"
293 response_type = "text/html; charset=UTF-8"
294 except:
295 response_body = traceback.format_exc()
297 response_headers = [
298 ("Content-Type", response_type),
299 ("Content-Length", str(len(response_body))),
300 ]
301 start_response(response_code, response_headers)
303 return [bytes(response_body, encoding="utf-8")]
305 def call(self):
306 return main(self._iq, self._oq, self._main_lock)