Fix various flake8 issues, and a false report of unhandled texinfo command Jon Turney (5): makedocbook: Use raw strings for regexes makedocbook: Use sys.exit() makedocbook: Drop stray semicolons makedocbook: Adjust inline whitespace to fix flake8 warnings makedocbook: Fix false report of unhandled texinfo command newlib/doc/chapter-texi2docbook.py | 22 ++-- newlib/doc/makedocbook.py | 170 ++++++++++++++--------------- 2 files changed, 94 insertions(+), 98 deletions(-) -- 2.38.1
Use raw strings for regexes. This is best practice, and fixes a number of "W605 invalid escape sequence" flakes. --- newlib/doc/makedocbook.py | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/newlib/doc/makedocbook.py b/newlib/doc/makedocbook.py index 4e83ab63a..5e46082df 100755 --- a/newlib/doc/makedocbook.py +++ b/newlib/doc/makedocbook.py @@ -107,7 +107,7 @@ def remove_noncomments(src): # A command is a single word of at least 3 characters, all uppercase, and alone on a line def iscommand(l): - if re.match('^[A-Z_]{3,}\s*$', l): + if re.match(r'^[A-Z_]{3,}\s*$', l): return True return False @@ -198,7 +198,7 @@ def function(c, l): descr = line_markup_convert(', '.join(descrlist)) # fpclassify includes an 'and' we need to discard - namelist = map(lambda v: re.sub('^and ', '', v.strip(), 1), namelist) + namelist = map(lambda v: re.sub(r'^and ', r'', v.strip(), 1), namelist) # strip off << >> surrounding name namelist = map(lambda v: v.strip().lstrip('<').rstrip('>'), namelist) # instantiate list to make it subscriptable @@ -297,11 +297,11 @@ def synopsis(c, t): s = '' for l in t.splitlines(): - if re.match('\s*(#|\[|struct)', l): + if re.match(r'\s*(#|\[|struct)', l): # preprocessor # directives, structs, comments in square brackets funcsynopsisinfo = lxml.etree.SubElement(funcsynopsis, 'funcsynopsisinfo') funcsynopsisinfo.text = l.strip() + '\n' - elif re.match('[Ll]ink with', l): + elif re.match(r'[Ll]ink with', l): pass else: s = s + l @@ -348,7 +348,7 @@ def synopsis_for_prototype(funcsynopsis, s): void = lxml.etree.SubElement(funcprototype, 'void') else: # Split parameters on ',' except if it is inside () - for p in re.split(',(?![^()]*\))', match.group(3)): + for p in re.split(r',(?![^()]*\))', match.group(3)): p = p.strip() if verbose: @@ -361,7 +361,7 @@ def synopsis_for_prototype(funcsynopsis, s): parameter = lxml.etree.SubElement(paramdef, 'parameter') # <[ ]> enclose the parameter name - match2 = re.match('(.*)<\[(.*)\]>(.*)', p) + match2 = re.match(r'(.*)<\[(.*)\]>(.*)', p) if verbose: print(match2.groups(), file=sys.stderr) @@ -472,16 +472,16 @@ def line_markup_convert(p): # also convert some simple texinfo markup # convert @emph{foo} to <emphasis>foo</emphasis> - s = re.sub('@emph{(.*?)}', '<emphasis>\\1</emphasis>', s) + s = re.sub(r'@emph{(.*?)}', r'<emphasis>\1</emphasis>', s) # convert @strong{foo} to <emphasis role=strong>foo</emphasis> - s = re.sub('@strong{(.*?)}', '<emphasis role="strong">\\1</emphasis>', s) + s = re.sub(r'@strong{(.*?)}', r'<emphasis role="strong">\1</emphasis>', s) # convert @minus{} to U+2212 MINUS SIGN s = s.replace('@minus{}', '−') # convert @dots{} to U+2026 HORIZONTAL ELLIPSIS s = s.replace('@dots{}', '…') # convert xref and pxref - s = re.sub('@xref{(.*?)}', "See <xref linkend='\\1'/>", s) + s = re.sub(r'@xref{(.*?)}', r"See <xref linkend='\1'/>", s) # very hacky way of dealing with @* to force a newline s = s.replace('@*', '</para><para>') @@ -562,7 +562,7 @@ def t_TABLEEND(t): def t_ITEM(t): r'o\s.*\n' - t.value = re.sub('o\s', '', lexer.lexmatch.group(0), 1) + t.value = re.sub(r'o\s', r'', lexer.lexmatch.group(0), 1) t.value = line_markup_convert(t.value) return t @@ -828,7 +828,7 @@ def main(file): print(s) # warn about texinfo commands which didn't get processed - match = re.search('@[a-z*]+', s) + match = re.search(r'@[a-z*]+', s) if match: print('texinfo command %s remains in output' % match.group(), file=sys.stderr) -- 2.38.1
Use sys.exit() to write a message to stderr and terminate with a non-zero exit code. --- newlib/doc/makedocbook.py | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/newlib/doc/makedocbook.py b/newlib/doc/makedocbook.py index 5e46082df..57cd23bfd 100755 --- a/newlib/doc/makedocbook.py +++ b/newlib/doc/makedocbook.py @@ -214,8 +214,7 @@ def function(c, l): # FUNCTION implies starting a new refentry if refentry is not None: - print("multiple FUNCTIONs without NEWPAGE", file=sys.stderr) - exit(1) + sys.exit("multiple FUNCTIONs without NEWPAGE") # create the refentry refentry = lxml.etree.SubElement(rootelement, 'refentry') @@ -308,17 +307,15 @@ def synopsis(c, t): # a prototype without a terminating ';' is an error if s.endswith(')'): - print("'%s' missing terminating semicolon" % l, file=sys.stderr) + sys.exit("'%s' missing terminating semicolon" % l) s = s + ';' - exit(1) if ';' in s: synopsis_for_prototype(funcsynopsis, s) s = '' if s.strip(): - print("surplus synopsis '%s'" % s, file=sys.stderr) - exit(1) + sys.exit("surplus synopsis '%s'" % s) def synopsis_for_prototype(funcsynopsis, s): s = s.strip() @@ -591,8 +588,7 @@ def t_eof(t): # Error handling rule def t_error(t): - print("tokenization error, remaining text '%s'" % t.value, file=sys.stderr) - exit(1) + sys.exit("tokenization error, remaining text '%s'" % t.value) lexer = lex.lex() @@ -795,8 +791,8 @@ def p_multitable(p): parser_verbose(p) def p_error(t): - print('parse error at line %d, token %s, next token %s' % (t.lineno, t, parser.token()), file=sys.stderr) - exit(1) + sys.exit('parse error at line %d, token %s, next token %s' % (t.lineno, t, parser.token())) + # protect creating the parser with a lockfile, so that when multiple processes # are running this script simultaneously, we don't get one of them generating a -- 2.38.1
This isn't written in C :) --- newlib/doc/makedocbook.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/newlib/doc/makedocbook.py b/newlib/doc/makedocbook.py index 57cd23bfd..8a3c31ca6 100755 --- a/newlib/doc/makedocbook.py +++ b/newlib/doc/makedocbook.py @@ -449,7 +449,7 @@ command_dispatch_dict = { # apply transformations which are easy to do in-place def line_markup_convert(p): - s = p; + s = p # process the texinfo escape for an @ s = s.replace('@@', '@') @@ -582,7 +582,7 @@ def t_eof(t): return None t.type = 'EOF' - t.lexer.at_eof = True; + t.lexer.at_eof = True return t -- 2.38.1
--- newlib/doc/chapter-texi2docbook.py | 22 +++--- newlib/doc/makedocbook.py | 115 ++++++++++++++--------------- 2 files changed, 68 insertions(+), 69 deletions(-) diff --git a/newlib/doc/chapter-texi2docbook.py b/newlib/doc/chapter-texi2docbook.py index 70ab3c04f..e9904ad00 100755 --- a/newlib/doc/chapter-texi2docbook.py +++ b/newlib/doc/chapter-texi2docbook.py @@ -16,8 +16,8 @@ def main(): first_node = True prev_sect = False - print ('<?xml version="1.0" encoding="UTF-8"?>') - print ('<!DOCTYPE chapter PUBLIC "-//OASIS//DTD DocBook V4.5//EN" "http://www.oasis-open.org/docbook/xml/4.5/docbookx.dtd">') + print('<?xml version="1.0" encoding="UTF-8"?>') + print('<!DOCTYPE chapter PUBLIC "-//OASIS//DTD DocBook V4.5//EN" "http://www.oasis-open.org/docbook/xml/4.5/docbookx.dtd">') for l in sys.stdin.readlines(): l = l.rstrip() @@ -29,27 +29,27 @@ def main(): l = l.replace("@node", "", 1) l = l.strip() if first_node: - print ('<chapter id="%s_chapter" xmlns:xi="http://www.w3.org/2001/XInclude">' % l.lower().replace(' ', '_')) + print('<chapter id="%s_chapter" xmlns:xi="http://www.w3.org/2001/XInclude">' % l.lower().replace(' ', '_')) first_node = False else: if prev_sect: - print ('</section>') - print ('<section id="%s">' % l) + print('</section>') + print('<section id="%s">' % l) prev_sect = True elif l.startswith("@chapter "): l = l.replace("@chapter ", "", 1) - print ('<title>%s</title>' % l) + print('<title>%s</title>' % l) elif l.startswith("@section "): l = l.replace("@section ", "", 1) - print ('<title>%s</title>' % l) + print('<title>%s</title>' % l) elif l.startswith("@include "): l = l.replace("@include ", "", 1) l = l.replace(".def", ".xml", 1) - print ('<xi:include href="%s"/>' % l.strip()) + print('<xi:include href="%s"/>' % l.strip()) if prev_sect: - print ('</section>') - print ('</chapter>') + print('</section>') + print('</chapter>') -if __name__ == "__main__" : +if __name__ == "__main__": main() diff --git a/newlib/doc/makedocbook.py b/newlib/doc/makedocbook.py index 8a3c31ca6..4de20ef92 100755 --- a/newlib/doc/makedocbook.py +++ b/newlib/doc/makedocbook.py @@ -31,11 +31,11 @@ import lxml.etree import ply.lex as lex import ply.yacc as yacc -rootelement = None # root element of the XML tree -refentry = None # the current refentry +rootelement = None # root element of the XML tree +refentry = None # the current refentry verbose = 0 -def dump(s, stage, threshold = 1): +def dump(s, stage, threshold=1): if verbose > threshold: print('*' * 40, file=sys.stderr) print(stage, file=sys.stderr) @@ -49,7 +49,7 @@ def dump(s, stage, threshold = 1): def skip_whitespace_and_stars(i, src): - while i < len(src) and (src[i].isspace() or (src[i] == '*' and src[i+1] != '/')): + while i < len(src) and (src[i].isspace() or (src[i] == '*' and src[i + 1] != '/')): i += 1 return i @@ -62,7 +62,7 @@ def comment_contents_generator(src): i = 0 while i < len(src) - 2: - if src[i] == '\n' and src[i+1] == '/' and src[i+2] == '*': + if src[i] == '\n' and src[i + 1] == '/' and src[i + 2] == '*': i = i + 3 i = skip_whitespace_and_stars(i, src) @@ -82,7 +82,7 @@ def comment_contents_generator(src): i = skip_whitespace_and_stars(i, src) - elif src[i] == '*' and src[i+1] == '/': + elif src[i] == '*' and src[i + 1] == '/': i = i + 2 # If we have just output \n\n, this adds another blank line. # This is the only way a double blank line can occur. @@ -177,10 +177,10 @@ def function(c, l): spliton = ';' o = '' for i in l.splitlines(): - if separator in i: - o += i + ';' - else: - o += i + if separator in i: + o += i + ';' + else: + o += i l = o[:-1] else: spliton = '\n' @@ -265,7 +265,7 @@ def index(c, l): primary.text = l # to validate, it seems we need to maintain refentry elements in a certain order - refentry[:] = sorted(refentry, key = lambda x: x.tag if isinstance(x.tag, str) else '') + refentry[:] = sorted(refentry, key=lambda x: x.tag if isinstance(x.tag, str) else '') # adds another alternate refname refnamediv = refentry.find('refnamediv') @@ -281,7 +281,7 @@ def index(c, l): print('duplicate refname %s discarded' % l, file=sys.stderr) # to validate, it seems we need to maintain refnamediv elements in a certain order - refnamediv[:] = sorted(refnamediv, key = lambda x: x.tag) + refnamediv[:] = sorted(refnamediv, key=lambda x: x.tag) # SYNOPSIS aka ANSI_SYNOPSIS @@ -378,14 +378,13 @@ def synopsis_for_prototype(funcsynopsis, s): # sscanf, have very complex layout using nested tables and itemized lists, which # it is best to parse in order to transform correctly. # - def refsect(t, s): refsect = lxml.etree.SubElement(refentry, 'refsect1') title = lxml.etree.SubElement(refsect, 'title') title.text = t.title() if verbose: - print('%s has %d paragraphs' % (t, len(s.split('\n\n'))) , file=sys.stderr) + print('%s has %d paragraphs' % (t, len(s.split('\n\n'))), file=sys.stderr) if verbose > 1: dump(s, 'before lexing') @@ -422,25 +421,25 @@ def discarded(c, t): return command_dispatch_dict = { - 'FUNCTION' : function, - 'TYPEDEF' : function, # TYPEDEF is not currently used, but described in doc.str - 'INDEX' : index, - 'TRAD_SYNOPSIS' : discarded, # K&R-style synopsis, obsolete and discarded - 'ANSI_SYNOPSIS' : synopsis, - 'SYNOPSIS' : synopsis, - 'DESCRIPTION' : refsect, - 'RETURNS' : refsect, - 'ERRORS' : refsect, - 'PORTABILITY' : refsect, - 'BUGS' : refsect, - 'WARNINGS' : refsect, - 'SEEALSO' : seealso, - 'NOTES' : refsect, # NOTES is not described in doc.str, so is currently discarded by makedoc, but that doesn't seem right - 'QUICKREF' : discarded, # The intent of QUICKREF and MATHREF is not obvious, but they don't generate any output currently - 'MATHREF' : discarded, - 'START' : discarded, # a START command is inserted to contain the text before the first command - 'END' : discarded, # an END command is inserted merely to terminate the text for the last command in a comment block - 'NEWPAGE' : newpage, + 'FUNCTION': function, + 'TYPEDEF': function, # TYPEDEF is not currently used, but described in doc.str + 'INDEX': index, + 'TRAD_SYNOPSIS': discarded, # K&R-style synopsis, obsolete and discarded + 'ANSI_SYNOPSIS': synopsis, + 'SYNOPSIS': synopsis, + 'DESCRIPTION': refsect, + 'RETURNS': refsect, + 'ERRORS': refsect, + 'PORTABILITY': refsect, + 'BUGS': refsect, + 'WARNINGS': refsect, + 'SEEALSO': seealso, + 'NOTES': refsect, # NOTES is not described in doc.str, so is currently discarded by makedoc, but that doesn't seem right + 'QUICKREF': discarded, # The intent of QUICKREF and MATHREF is not obvious, but they don't generate any output currently + 'MATHREF': discarded, + 'START': discarded, # a START command is inserted to contain the text before the first command + 'END': discarded, # an END command is inserted merely to terminate the text for the last command in a comment block + 'NEWPAGE': newpage, } # @@ -455,17 +454,17 @@ def line_markup_convert(p): s = s.replace('@@', '@') # escape characters not allowed in XML - s = s.replace('&','&') - s = s.replace('<','<') - s = s.replace('>','>') + s = s.replace('&', '&') + s = s.replace('<', '<') + s = s.replace('>', '>') # convert <<somecode>> to <code>somecode</code> and <[var]> to # <varname>var</varname> # also handle nested << <[ ]> >> correctly - s = s.replace('<<','<code>') - s = s.replace('<[','<varname>') - s = s.replace(']>','</varname>') - s = s.replace('>>','</code>') + s = s.replace('<<', '<code>') + s = s.replace('<[', '<varname>') + s = s.replace(']>', '</varname>') + s = s.replace('>>', '</code>') # also convert some simple texinfo markup # convert @emph{foo} to <emphasis>foo</emphasis> @@ -493,18 +492,18 @@ def line_markup_convert(p): # texinfo_commands = { - 'ifnottex' : 'IFNOTTEX', - 'end ifnottex' : 'ENDIFNOTTEX', - 'tex' : 'IFTEX', - 'end tex' : 'ENDIFTEX', - 'comment' : 'COMMENT', - 'c ' : 'COMMENT', - 'multitable' : 'MULTICOLUMNTABLE', - 'end multitable' : 'ENDMULTICOLUMNTABLE', - 'headitem' : 'MCT_HEADITEM', - 'tab' : 'MCT_COLUMN_SEPARATOR', - 'item' : 'MCT_ITEM', - } + 'ifnottex': 'IFNOTTEX', + 'end ifnottex': 'ENDIFNOTTEX', + 'tex': 'IFTEX', + 'end tex': 'ENDIFTEX', + 'comment': 'COMMENT', + 'c ': 'COMMENT', + 'multitable': 'MULTICOLUMNTABLE', + 'end multitable': 'ENDMULTICOLUMNTABLE', + 'headitem': 'MCT_HEADITEM', + 'tab': 'MCT_COLUMN_SEPARATOR', + 'item': 'MCT_ITEM', +} # token names tokens = [ @@ -575,9 +574,9 @@ def t_BLANKLINE(t): return t def t_eof(t): - if hasattr(t.lexer,'at_eof'): + if hasattr(t.lexer, 'at_eof'): # remove eof flag ready for lexing next input - delattr(t.lexer,'at_eof') + delattr(t.lexer, 'at_eof') t.lexer.lineno = 0 return None @@ -787,9 +786,10 @@ def p_multitable(p): colspec = '\n'.join(['<colspec colwidth="%s*"/>' % (c) for c in colfrac]) header = '<thead>' + p[2] + '</thead>\n' body = '<tbody>' + p[3] + '</tbody>\n' - p[0] = '<informaltable><tgroup cols="' + str(len(colfrac)) +'">' + colspec + header + body + '</tgroup></informaltable>' + p[0] = '<informaltable><tgroup cols="' + str(len(colfrac)) + '">' + colspec + header + body + '</tgroup></informaltable>' parser_verbose(p) + def p_error(t): sys.exit('parse error at line %d, token %s, next token %s' % (t.lineno, t, parser.token())) @@ -831,10 +831,9 @@ def main(file): # # # - -if __name__ == '__main__' : +if __name__ == '__main__': options = OptionParser() - options.add_option('-v', '--verbose', action='count', dest = 'verbose', default = 0) + options.add_option('-v', '--verbose', action='count', dest='verbose', default=0) (opts, args) = options.parse_args() verbose = opts.verbose -- 2.38.1
During 'make man', makedocbook falsely reports "texinfo command '@modifier' remains in output" while processing the setlocal(3) manpage, which contains that literal string. Move the check for unrecognized texinfo commands to before processing '@@' (an escaped '@') in the texinfo source, and teach it to ignore them. Improve that check slightly, so it catches non-alphabetic texinfo commands, of which there are few. Now we don't have false positives, we can make unrecognized texinfo commands fatal to manpage generation, rather than leaving them verbatim in the generated manpage. --- newlib/doc/makedocbook.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/newlib/doc/makedocbook.py b/newlib/doc/makedocbook.py index 4de20ef92..9c5615f22 100755 --- a/newlib/doc/makedocbook.py +++ b/newlib/doc/makedocbook.py @@ -450,9 +450,6 @@ command_dispatch_dict = { def line_markup_convert(p): s = p - # process the texinfo escape for an @ - s = s.replace('@@', '@') - # escape characters not allowed in XML s = s.replace('&', '&') s = s.replace('<', '<') @@ -482,6 +479,14 @@ def line_markup_convert(p): # very hacky way of dealing with @* to force a newline s = s.replace('@*', '</para><para>') + # fail if there are unhandled texinfo commands + match = re.search(r'(?<!@)@[^@\s]+', s) + if match: + sys.exit("texinfo command '%s' remains in output" % match.group(0)) + + # process the texinfo escape for an @ + s = s.replace('@@', '@') + if (verbose > 3) and (s != p): print('%s-> line_markup_convert ->\n%s' % (p, s), file=sys.stderr) @@ -823,10 +828,6 @@ def main(file): print(s) - # warn about texinfo commands which didn't get processed - match = re.search(r'@[a-z*]+', s) - if match: - print('texinfo command %s remains in output' % match.group(), file=sys.stderr) # # -- 2.38.1
Hi, I'm no committer, so don't take my comment as such. On 2022-11-04 14:49, Jon Turney wrote: > Use sys.exit() to write a message to stderr and terminate with a > non-zero exit code. > --- > newlib/doc/makedocbook.py | 16 ++++++---------- > 1 file changed, 6 insertions(+), 10 deletions(-) > > diff --git a/newlib/doc/makedocbook.py b/newlib/doc/makedocbook.py > index 5e46082df..57cd23bfd 100755 > --- a/newlib/doc/makedocbook.py > +++ b/newlib/doc/makedocbook.py > @@ -214,8 +214,7 @@ def function(c, l): > > # FUNCTION implies starting a new refentry > if refentry is not None: > - print("multiple FUNCTIONs without NEWPAGE", file=sys.stderr) > - exit(1) > + sys.exit("multiple FUNCTIONs without NEWPAGE") > > # create the refentry > refentry = lxml.etree.SubElement(rootelement, 'refentry') > @@ -308,17 +307,15 @@ def synopsis(c, t): > > # a prototype without a terminating ';' is an error > if s.endswith(')'): > - print("'%s' missing terminating semicolon" % l, file=sys.stderr) > + sys.exit("'%s' missing terminating semicolon" % l) I'm not sure when it was introduced in python, but you can use this syntax: sys.exit(f"'{l}' missing terminating semicolon") If you don't want to use this fancy syntax, consider to at least do: sys.exit("'%s' missing terminating semicolon" % (l,)) Kind regards, Torbjörn > s = s + ';' > - exit(1) > > if ';' in s: > synopsis_for_prototype(funcsynopsis, s) > s = '' > > if s.strip(): > - print("surplus synopsis '%s'" % s, file=sys.stderr) > - exit(1) > + sys.exit("surplus synopsis '%s'" % s) > > def synopsis_for_prototype(funcsynopsis, s): > s = s.strip() > @@ -591,8 +588,7 @@ def t_eof(t): > > # Error handling rule > def t_error(t): > - print("tokenization error, remaining text '%s'" % t.value, file=sys.stderr) > - exit(1) > + sys.exit("tokenization error, remaining text '%s'" % t.value) > > lexer = lex.lex() > > @@ -795,8 +791,8 @@ def p_multitable(p): > parser_verbose(p) > > def p_error(t): > - print('parse error at line %d, token %s, next token %s' % (t.lineno, t, parser.token()), file=sys.stderr) > - exit(1) > + sys.exit('parse error at line %d, token %s, next token %s' % (t.lineno, t, parser.token())) > + > > # protect creating the parser with a lockfile, so that when multiple processes > # are running this script simultaneously, we don't get one of them generating a
[-- Attachment #1: Type: text/plain, Size: 1581 bytes --] On 04 Nov 2022 15:52, Torbjorn SVENSSON wrote: > On 2022-11-04 14:49, Jon Turney wrote: > > Use sys.exit() to write a message to stderr and terminate with a > > non-zero exit code. > > --- > > newlib/doc/makedocbook.py | 16 ++++++---------- > > 1 file changed, 6 insertions(+), 10 deletions(-) > > > > diff --git a/newlib/doc/makedocbook.py b/newlib/doc/makedocbook.py > > index 5e46082df..57cd23bfd 100755 > > --- a/newlib/doc/makedocbook.py > > +++ b/newlib/doc/makedocbook.py > > @@ -214,8 +214,7 @@ def function(c, l): > > > > # FUNCTION implies starting a new refentry > > if refentry is not None: > > - print("multiple FUNCTIONs without NEWPAGE", file=sys.stderr) > > - exit(1) > > + sys.exit("multiple FUNCTIONs without NEWPAGE") > > > > # create the refentry > > refentry = lxml.etree.SubElement(rootelement, 'refentry') > > @@ -308,17 +307,15 @@ def synopsis(c, t): > > > > # a prototype without a terminating ';' is an error > > if s.endswith(')'): > > - print("'%s' missing terminating semicolon" % l, file=sys.stderr) > > + sys.exit("'%s' missing terminating semicolon" % l) > > I'm not sure when it was introduced in python, but you can use this syntax: > sys.exit(f"'{l}' missing terminating semicolon") f-strings are new to Python 3.6. i don't know what version we want to require when building from git (non-releases). other GNU toolchain projects seem to be OK with requiring newer versions like 3.6. -mike [-- Attachment #2: signature.asc --] [-- Type: application/pgp-signature, Size: 833 bytes --]
On Nov 5 21:28, Mike Frysinger wrote:
> lgtm, thanks
> -mike
Thanks Mike!
Jon, please push.
Thanks,
Corinna