annotate myrss2/myrss_app.py @ 141:df18c9d8857c

myrss2: FEEDS: Add theconversation.com, remove Coding Horror
author paulo
date Sun, 06 Aug 2023 17:34:26 +0000
parents cffd95813b82
children 2ed8cf5f36bf
rev   line source
paulo@110 1 import datetime
paulo@110 2 import gzip
paulo@108 3 import io
paulo@108 4 import os
paulo@110 5 import queue
paulo@110 6 import re
paulo@108 7 import sys
paulo@108 8 import threading
paulo@108 9 import time
paulo@108 10 import traceback
paulo@110 11 import urllib.error
paulo@110 12 import urllib.request
paulo@108 13
paulo@108 14 import logging
paulo@108 15 LOG_LEVEL = os.environ.get("LOG_LEVEL", "INFO")
paulo@108 16 logging.basicConfig(
paulo@108 17 level=getattr(logging, LOG_LEVEL),
paulo@108 18 format="%(asctime)s %(levelname)-8s %(message)s",
paulo@108 19 )
paulo@108 20
paulo@108 21 import xml.etree.ElementTree
paulo@108 22 import html
paulo@108 23
paulo@108 24 from html3.html3 import HTML
paulo@108 25
paulo@108 26
paulo@108 27 FEEDS_FILE = "FEEDS"
paulo@108 28 CACHE_HTML_FILE = "__cache__.html"
paulo@108 29
paulo@108 30 CACHE_LIFE = 1200 # [seconds]
paulo@108 31 MAX_ITEMS = 50
paulo@108 32 MAX_LINK_Z = 4
paulo@108 33 MAX_THREADS = 20
paulo@108 34 URLOPEN_TIMEOUT = 10 # [seconds]
paulo@108 35
paulo@108 36
paulo@108 37 _PARSE_ROOT_TAG_RE = re.compile(r"(\{(.+)\})?(.+)")
paulo@108 38
paulo@108 39 def _parse_root_tag(root_tag):
paulo@108 40 re_match = _PARSE_ROOT_TAG_RE.match(root_tag)
paulo@108 41
paulo@108 42 if re_match is None:
paulo@108 43 return (None, None)
paulo@108 44 else:
paulo@108 45 return re_match.group(2, 3)
paulo@108 46
paulo@108 47
paulo@108 48 def _strip_if_not_none(txt):
paulo@108 49 return txt.strip() if txt is not None else ''
paulo@108 50
paulo@108 51
paulo@108 52 def _go_rss(elementTree):
paulo@108 53 title = _strip_if_not_none(elementTree.find("channel/title").text)
paulo@108 54 link = elementTree.find("channel/link").text
paulo@108 55
paulo@108 56 items = []
paulo@108 57
paulo@108 58 for i in elementTree.findall("channel/item")[:MAX_ITEMS]:
paulo@108 59 it_title = _strip_if_not_none(i.find("title").text)
paulo@108 60 it_link = i.find("link").text
paulo@108 61
paulo@108 62 items.append((it_title, it_link))
paulo@108 63
paulo@108 64 return (title, link, items)
paulo@108 65
paulo@108 66
paulo@108 67 def _go_atom(elementTree):
paulo@108 68 ns = "http://www.w3.org/2005/Atom"
paulo@108 69
paulo@108 70 title = _strip_if_not_none(elementTree.find("{%s}title" % ns).text)
paulo@108 71 link = ''
paulo@108 72
paulo@108 73 links = elementTree.findall("{%s}link" % ns)
paulo@108 74 for i in links:
paulo@108 75 if len(links) == 1 or i.get("rel") == "alternate":
paulo@108 76 link = i.get("href")
paulo@108 77 break
paulo@108 78
paulo@108 79 items = []
paulo@108 80
paulo@108 81 for i in elementTree.findall("{%s}entry" % ns)[:MAX_ITEMS]:
paulo@108 82 it_title = _strip_if_not_none(i.find("{%s}title" % ns).text)
paulo@108 83 it_link = ''
paulo@108 84
paulo@108 85 it_links = i.findall("{%s}link" % ns)
paulo@108 86 for j in it_links:
paulo@108 87 if len(it_links) == 1 or j.get("rel") == "alternate":
paulo@108 88 it_link = j.get("href")
paulo@108 89 break
paulo@108 90
paulo@108 91 items.append((it_title, it_link))
paulo@108 92
paulo@108 93 return (title, link, items)
paulo@108 94
paulo@108 95
paulo@108 96 def _go_purl_rss(elementTree):
paulo@108 97 ns = "http://purl.org/rss/1.0/"
paulo@108 98
paulo@108 99 title = _strip_if_not_none(elementTree.find("{%s}channel/{%s}title" % (ns, ns)).text)
paulo@108 100 link = elementTree.find("{%s}channel/{%s}link" % (ns, ns)).text
paulo@108 101
paulo@108 102 items = []
paulo@108 103
paulo@108 104 for i in elementTree.findall("{%s}item" % ns)[:MAX_ITEMS]:
paulo@108 105 it_title = _strip_if_not_none(i.find("{%s}title" % ns).text)
paulo@108 106 it_link = i.find("{%s}link" % ns).text
paulo@108 107
paulo@108 108 items.append((it_title, it_link))
paulo@108 109
paulo@108 110 return (title, link, items)
paulo@108 111
paulo@108 112
paulo@108 113 _STRIP_HTML_RE = re.compile(r"<.*?>")
paulo@108 114
paulo@108 115 def _strip_html(txt):
paulo@108 116 return html.unescape(_STRIP_HTML_RE.sub('', txt))
paulo@108 117
paulo@108 118
paulo@108 119 def _to_html(dtnow, docstruct):
paulo@108 120 datetime_str = dtnow.strftime("%Y-%m-%d %H:%M %Z")
paulo@108 121 page_title = "myrss -- %s" % datetime_str
paulo@108 122
paulo@108 123 root = HTML("html")
paulo@108 124
paulo@108 125 header = root.head
paulo@108 126 header.meta(name="viewport", content="width=device-width, initial-scale=1")
paulo@108 127 header.title(page_title)
paulo@108 128 header.link(rel="stylesheet", type="text/css", href="static/index.css")
paulo@108 129
paulo@108 130 body = root.body
paulo@108 131 body.h1(page_title)
paulo@108 132
paulo@108 133 link_z = 0
paulo@108 134
paulo@108 135 for feed in docstruct:
paulo@108 136 if feed is None:
paulo@108 137 continue
paulo@108 138
paulo@108 139 (title, link, items) = feed
paulo@108 140
paulo@108 141 logging.debug("title: %s", title)
paulo@108 142 body.h2.a(_strip_html(title), href=link, klass="z%d" % (link_z % MAX_LINK_Z))
paulo@108 143 link_z += 1
paulo@108 144 p = body.p
paulo@108 145
paulo@108 146 for (i, (it_title, it_link)) in enumerate(items):
paulo@108 147 if i > 0:
paulo@108 148 p += " - "
paulo@108 149
paulo@108 150 if not it_title:
paulo@108 151 it_title = "(missing title)"
paulo@108 152 if it_link is not None:
paulo@108 153 p.a(_strip_html(it_title), href=it_link, klass="z%d" % (link_z % MAX_LINK_Z))
paulo@108 154 else:
paulo@108 155 p += _strip_html(it_title)
paulo@108 156
paulo@108 157 link_z += 1
paulo@108 158
paulo@108 159 dtdelta = datetime.datetime.now() - dtnow
paulo@108 160 root.div("%.3f" % (dtdelta.days*86400 + dtdelta.seconds + dtdelta.microseconds/1e6), klass="debug")
paulo@108 161
paulo@108 162 return str(root)
paulo@108 163
paulo@108 164
paulo@108 165 def _fetch_url(url):
paulo@108 166 try:
paulo@108 167 logging.info("processing %s" % url)
paulo@108 168 feed = urllib.request.urlopen(urllib.request.Request(url, headers={"User-Agent": "Mozilla/5.0 Browser"}), timeout=URLOPEN_TIMEOUT)
paulo@110 169 response_headers = feed.info().as_string().splitlines()
paulo@110 170 if 'Content-Encoding: gzip' in response_headers:
paulo@110 171 body = gzip.decompress(feed.read())
paulo@110 172 else:
paulo@110 173 body = feed.read()
paulo@110 174
paulo@108 175 except urllib.error.HTTPError as e:
paulo@108 176 logging.info("(%s) %s" % (url, e))
paulo@108 177 return None
paulo@108 178
paulo@110 179 return str(body, encoding="utf-8")
paulo@108 180
paulo@108 181
paulo@108 182 def _filter_feed(feed):
paulo@110 183 ret = feed.strip()
paulo@108 184
paulo@108 185 filter_out = ["\x16"]
paulo@108 186 for i in filter_out:
paulo@108 187 ret = ret.replace(i, "")
paulo@108 188
paulo@108 189 return ret
paulo@108 190
paulo@108 191
paulo@108 192 def _process_feed(feed):
paulo@108 193 ret = None
paulo@108 194
paulo@108 195 feed_sio = io.StringIO(feed)
paulo@108 196 elementTree = xml.etree.ElementTree.parse(feed_sio)
paulo@108 197 root = elementTree.getroot()
paulo@108 198
paulo@108 199 parsed_root_tag = _parse_root_tag(root.tag)
paulo@108 200
paulo@108 201 if parsed_root_tag == (None, "rss"):
paulo@108 202 version = float(root.get("version", 0.0))
paulo@108 203 if version >= 2.0:
paulo@108 204 ret = _go_rss(elementTree)
paulo@108 205 else:
paulo@108 206 raise NotImplementedError("Unsupported rss version")
paulo@108 207 elif parsed_root_tag == ("http://www.w3.org/2005/Atom", "feed"):
paulo@108 208 ret = _go_atom(elementTree)
paulo@108 209 elif parsed_root_tag == ("http://www.w3.org/1999/02/22-rdf-syntax-ns#", "RDF"):
paulo@108 210 ret = _go_purl_rss(elementTree)
paulo@108 211 else:
paulo@108 212 raise NotImplementedError("Unknown root tag")
paulo@108 213
paulo@108 214 return ret
paulo@108 215
paulo@108 216
paulo@108 217 class WorkerThread(threading.Thread):
paulo@108 218 def __init__(self, *args, **kwargs):
paulo@108 219 self._input_queue = kwargs.pop("input_queue")
paulo@108 220 self._output_queue = kwargs.pop("output_queue")
paulo@108 221 threading.Thread.__init__(self, *args, **kwargs)
paulo@108 222 self.daemon = True
paulo@108 223
paulo@108 224 def run(self):
paulo@108 225 while True:
paulo@108 226 (idx, url) = self._input_queue.get()
paulo@108 227 docfeed = None
paulo@108 228 try:
paulo@108 229 feed = _fetch_url(url)
paulo@108 230 if feed is not None:
paulo@108 231 docfeed = _process_feed(_filter_feed(feed))
paulo@108 232 except Exception as e:
paulo@108 233 logging.info("(%s) exception: (%s) %s" % (url, type(e), e))
paulo@108 234 self._output_queue.put((idx, docfeed))
paulo@108 235
paulo@108 236
paulo@108 237 def main(input_queue, output_queue, lock):
paulo@108 238 ret = ''
paulo@108 239
paulo@108 240 with lock:
paulo@108 241 logging.debug("main() started")
paulo@108 242 epoch_now = time.time()
paulo@108 243 dtnow = datetime.datetime.fromtimestamp(epoch_now)
paulo@108 244
paulo@108 245 if os.path.exists(CACHE_HTML_FILE) and (epoch_now - os.stat(CACHE_HTML_FILE).st_mtime) < float(CACHE_LIFE):
paulo@108 246 with open(CACHE_HTML_FILE) as cache_html_file:
paulo@108 247 ret = cache_html_file.read()
paulo@108 248
paulo@108 249 else:
paulo@108 250 with open(FEEDS_FILE) as feeds_file:
paulo@108 251 feedlines = feeds_file.readlines()
paulo@108 252
paulo@108 253 docstruct = [None]*len(feedlines)
paulo@108 254 num_input = 0
paulo@108 255 for (i, l) in enumerate(feedlines):
paulo@108 256 if l[0] != '#':
paulo@108 257 l = l.strip()
paulo@108 258 input_queue.put((i, l))
paulo@108 259 num_input += 1
paulo@108 260
paulo@108 261 for _ in range(num_input):
paulo@108 262 (idx, docfeed) = output_queue.get()
paulo@108 263 docstruct[idx] = docfeed
paulo@108 264
paulo@108 265 ret = _to_html(dtnow, docstruct)
paulo@108 266
paulo@108 267 with open(CACHE_HTML_FILE, 'w') as cache_html_file:
paulo@108 268 cache_html_file.write(ret)
paulo@108 269 logging.debug("main() ended")
paulo@108 270
paulo@108 271 return ret
paulo@108 272
paulo@108 273
paulo@108 274 class MyRssApp:
paulo@108 275 def __init__(self):
paulo@108 276 logging.debug("MyRssApp.__init__() called")
paulo@108 277 self._iq = queue.Queue(MAX_THREADS)
paulo@108 278 self._oq = queue.Queue(MAX_THREADS)
paulo@108 279 self._main_lock = threading.Lock()
paulo@108 280
paulo@108 281 for i in range(MAX_THREADS):
paulo@108 282 logging.debug("Starting thread: %d" % i)
paulo@108 283 WorkerThread(input_queue=self._iq, output_queue=self._oq).start()
paulo@108 284
paulo@110 285 # Raw WSGI
paulo@108 286 def __call__(self, environ, start_response):
paulo@108 287 response_code = "500 Internal Server Error"
paulo@108 288 response_type = "text/plain; charset=UTF-8"
paulo@108 289
paulo@108 290 try:
paulo@108 291 response_body = main(self._iq, self._oq, self._main_lock)
paulo@108 292 response_code = "200 OK"
paulo@108 293 response_type = "text/html; charset=UTF-8"
paulo@108 294 except:
paulo@108 295 response_body = traceback.format_exc()
paulo@108 296
paulo@108 297 response_headers = [
paulo@108 298 ("Content-Type", response_type),
paulo@108 299 ("Content-Length", str(len(response_body))),
paulo@108 300 ]
paulo@108 301 start_response(response_code, response_headers)
paulo@108 302
paulo@108 303 return [bytes(response_body, encoding="utf-8")]
paulo@108 304
paulo@108 305 def call(self):
paulo@108 306 return main(self._iq, self._oq, self._main_lock)