From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: by sourceware.org (Postfix, from userid 1499) id 14B1D3858D20; Wed, 9 Nov 2022 19:05:33 +0000 (GMT) DKIM-Filter: OpenDKIM Filter v2.11.0 sourceware.org 14B1D3858D20 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gcc.gnu.org; s=default; t=1668020733; bh=LVlOQfbJ8XLUgy4lhcEEej4trSduL7kfaaOem9ZWL2M=; h=From:To:Subject:Date:From; b=uipJ+pa0y0XYJi1IM/KZNzFrPiEUkoixvsYSf+t2B/uhgMIhLqbqiHFGJ5a6eZYOe lImHdKxjQJqaq1Ul+PCsFRXd3DKYvTnT00vGv/+1XVdCPXiOOgt5LhbFXIbdVFKth4 PEemIKKV5/cW+sTuQfO/XkptJ4uY6Qtg5Jm6QnLg= Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: Gaius Mulley To: gcc-cvs@gcc.gnu.org Subject: [gcc/devel/modula-2] gcc/m2/tools-src/*.py coding style changes. X-Act-Checkin: gcc X-Git-Author: Gaius Mulley X-Git-Refname: refs/heads/devel/modula-2 X-Git-Oldrev: fe8cdf1cab19139649846f0c168690f99e35241e X-Git-Newrev: d1eed6043121a0f546b60b89b8d508c3f1adb0ac Message-Id: <20221109190533.14B1D3858D20@sourceware.org> Date: Wed, 9 Nov 2022 19:05:33 +0000 (GMT) List-Id: https://gcc.gnu.org/g:d1eed6043121a0f546b60b89b8d508c3f1adb0ac commit d1eed6043121a0f546b60b89b8d508c3f1adb0ac Author: Gaius Mulley Date: Wed Nov 9 19:03:57 2022 +0000 gcc/m2/tools-src/*.py coding style changes. Use pythonic naming scheme. Use python3 string method invocation. Replace double quotes by single quotes. gcc/m2/ChangeLog: * tools-src/boilerplate.py: Name style changes. Double quotes to single quotes. String method invocation changes. * tools-src/def2doc.py: Name style changes. Double quotes to single quotes. String method invocation changes. * tools-src/tidydates.py: Name style changes. Double quotes to single quotes. String method invocation changes. Signed-off-by: Gaius Mulley Diff: --- gcc/m2/tools-src/boilerplate.py | 210 ++++++++--------- gcc/m2/tools-src/def2doc.py | 484 ++++++++++++++++++++-------------------- gcc/m2/tools-src/tidydates.py | 110 ++++----- 3 files changed, 402 insertions(+), 402 deletions(-) diff --git a/gcc/m2/tools-src/boilerplate.py b/gcc/m2/tools-src/boilerplate.py index 697972f123b..0976aab2671 100644 --- a/gcc/m2/tools-src/boilerplate.py +++ b/gcc/m2/tools-src/boilerplate.py @@ -28,9 +28,9 @@ import os import sys -errorCount = 0 -seenFiles = [] -outputName = None +error_count = 0 +seen_files = [] +output_name = None ISO_COPYRIGHT = "Copyright ISO/IEC" COPYRIGHT = "Copyright (C)" @@ -51,14 +51,14 @@ def printf(fmt, *args): def error(fmt, *args): # error - issue an error message. - global errorCount + global error_count print(str(fmt) % args, end=' ') - errorCount += 1 + error_count += 1 -def haltOnError(): - if errorCount > 0: +def halt_on_error(): + if error_count > 0: os.sys.exit(1) @@ -67,8 +67,8 @@ def basename(f): return b[-1] -def analyseComment(text, f): - # analyseComment determine the license from the top comment. +def analyse_comment(text, f): + # analyse_comment determine the license from the top comment. start_date, end_date = None, None contribution, summary, lic = None, None, None if text.find(ISO_COPYRIGHT) > 0: @@ -112,7 +112,7 @@ def analyseComment(text, f): return start_date, end_date, contribution, summary, lic -def analyseHeaderWithoutTerminator(f, start): +def analyse_header_without_terminator(f, start): text = "" for count, l in enumerate(open(f).readlines()): parts = l.split(start) @@ -122,11 +122,11 @@ def analyseHeaderWithoutTerminator(f, start): text += " " text += line elif (l.rstrip() != "") and (len(parts[0]) > 0): - return analyseComment(text, f), count + return analyse_comment(text, f), count return [None, None, None, None, None], 0 -def analyseHeaderWithTerminator(f, start, end): +def analyse_header_with_terminator(f, start, end): inComment = False text = "" for count, line in enumerate(open(f).readlines()): @@ -147,26 +147,26 @@ def analyseHeaderWithTerminator(f, start, end): if (pos >= 0) and (len(line) > len(start)): before = line[:pos].strip() if before != "": - return analyseComment(text, f), count + return analyse_comment(text, f), count line = line[pos + len(start):] inComment = True elif (line != "") and (line == end): line = "" else: - return analyseComment(text, f), count + return analyse_comment(text, f), count return [None, None, None, None, None], 0 -def analyseHeader(f, start, end): - # analyseHeader - +def analyse_header(f, start, end): + # analyse_header - if end is None: - return analyseHeaderWithoutTerminator(f, start) + return analyse_header_without_terminator(f, start) else: - return analyseHeaderWithTerminator(f, start, end) + return analyse_header_with_terminator(f, start, end) -def addStop(sentence): - # addStop - add a full stop to a sentance. +def add_stop(sentence): + # add_stop - add a full stop to a sentance. if sentence is None: return None sentence = sentence.rstrip() @@ -269,7 +269,7 @@ templates["LGPLv2.1"] = LGPLv3 templates["BSISO"] = BSISO -def writeTemplate(fo, magic, start, end, dates, contribution, summary, lic): +def write_template(fo, magic, start, end, dates, contribution, summary, lic): if lic in templates: if lic == "BSISO": # non gpl but freely distributed for the implementation of a @@ -279,8 +279,8 @@ def writeTemplate(fo, magic, start, end, dates, contribution, summary, lic): else: summary = summary.lstrip() contribution = contribution.lstrip() - summary = addStop(summary) - contribution = addStop(contribution) + summary = add_stop(summary) + contribution = add_stop(contribution) if magic is not None: fo.write(magic) fo.write("\n") @@ -311,40 +311,40 @@ def writeTemplate(fo, magic, start, end, dates, contribution, summary, lic): return fo -def writeBoilerPlate(fo, magic, start, end, - start_date, end_date, contribution, summary, gpl): +def write_boiler_plate(fo, magic, start, end, + start_date, end_date, contribution, summary, gpl): if start_date == end_date: dates = start_date else: dates = "%s-%s" % (start_date, end_date) - return writeTemplate(fo, magic, start, end, - dates, contribution, summary, gpl) + return write_template(fo, magic, start, end, + dates, contribution, summary, gpl) -def rewriteFile(f, magic, start, end, start_date, end_date, - contribution, summary, gpl, lines): +def rewrite_file(f, magic, start, end, start_date, end_date, + contribution, summary, gpl, lines): text = "".join(open(f).readlines()[lines:]) - if outputName == "-": + if output_name == "-": fo = sys.stdout else: fo = open(f, "w") - fo = writeBoilerPlate(fo, magic, start, end, - start_date, end_date, contribution, summary, gpl) + fo = write_boiler_plate(fo, magic, start, end, + start_date, end_date, contribution, summary, gpl) fo.write(text) fo.flush() - if outputName != "-": + if output_name != "-": fo.close() -def handleHeader(f, magic, start, end): - # handleHeader keep reading lines of file, f, looking for start, end +def handle_header(f, magic, start, end): + # handle_header keep reading lines of file, f, looking for start, end # sequences and comments inside. The comments are checked for: # date, contribution, summary - global errorCount + global error_count - errorCount = 0 + error_count = 0 [start_date, end_date, - contribution, summary, lic], lines = analyseHeader(f, start, end) + contribution, summary, lic], lines = analyse_header(f, start, end) if lic is None: error("%s:1:no GPL found at the top of the file\n", f) else: @@ -373,7 +373,7 @@ def handleHeader(f, magic, start, end): "GPL at the top of the file\n", f) else: summary = args.summary - if errorCount == 0: + if error_count == 0: now = datetime.datetime.now() if args.no: print(f, "suppressing change as requested: %s-%s %s" @@ -386,68 +386,68 @@ def handleHeader(f, magic, start, end): lic = "GPLv3x" elif args.gpl3: lic = "GPLv3" - rewriteFile(f, magic, start, end, start_date, - str(now.year), contribution, summary, lic, lines) + rewrite_file(f, magic, start, end, start_date, + str(now.year), contribution, summary, lic, lines) else: printf("too many errors, no modifications will occur\n") -def bashTidy(f): - # bashTidy tidy up dates using '#' comment - handleHeader(f, "#!/bin/bash", "#", None) +def bash_tidy(f): + # bash_tidy tidy up dates using '#' comment + handle_header(f, "#!/bin/bash", "#", None) -def pythonTidy(f): - # pythonTidy tidy up dates using '#' comment - handleHeader(f, "#!/usr/bin/env python3", '#', None) +def python_tidy(f): + # python_tidy tidy up dates using '#' comment + handle_header(f, "#!/usr/bin/env python3", '#', None) -def bnfTidy(f): - # bnfTidy tidy up dates using '--' comment - handleHeader(f, None, '--', None) +def bnf_tidy(f): + # bnf_tidy tidy up dates using '--' comment + handle_header(f, None, '--', None) -def cTidy(f): - # cTidy tidy up dates using '/* */' comments - handleHeader(f, None, '/*', '*/') +def c_tidy(f): + # c_tidy tidy up dates using '/* */' comments + handle_header(f, None, '/*', '*/') -def m2Tidy(f): - # m2Tidy tidy up dates using '(* *)' comments - handleHeader(f, None, '(*', '*)') +def m2_tidy(f): + # m2_tidy tidy up dates using '(* *)' comments + handle_header(f, None, '(*', '*)') -def inTidy(f): - # inTidy tidy up dates using '#' as a comment and check +def in_tidy(f): + # in_tidy tidy up dates using '#' as a comment and check # the first line for magic number. first = open(f).readlines()[0] if (len(first) > 0) and (first[:2] == "#!"): # magic number found, use this - handleHeader(f, first, "#", None) + handle_header(f, first, "#", None) else: - handleHeader(f, None, "#", None) + handle_header(f, None, "#", None) -def doVisit(args, dirname, names): - # doVisit helper function to call func on every extension file. - global outputName +def do_visit(args, dirname, names): + # do_visit helper function to call func on every extension file. + global output_name func, extension = args for f in names: if len(f) > len(extension) and f[-len(extension):] == extension: - outputName = f + output_name = f func(os.path.join(dirname, f)) -def visitDir(startDir, ext, func): - # visitDir call func for each file in startDir which has ext. - global outputName, seenFiles +def visit_dir(startDir, ext, func): + # visit_dir call func for each file in startDir which has ext. + global output_name, seen_files for dirName, subdirList, fileList in os.walk(startDir): for fname in fileList: if (len(fname) > len(ext)) and (fname[-len(ext):] == ext): fullpath = os.path.join(dirName, fname) - outputName = fullpath - if not (fullpath in seenFiles): - seenFiles += [fullpath] + output_name = fullpath + if not (fullpath in seen_files): + seen_files += [fullpath] func(fullpath) # Remove the first entry in the list of sub-directories # if there are any sub-directories present @@ -455,22 +455,22 @@ def visitDir(startDir, ext, func): del subdirList[0] -def findFiles(): - # findFiles for each file extension call the appropriate tidy routine. - visitDir(args.recursive, '.h.in', cTidy) - visitDir(args.recursive, '.in', inTidy) - visitDir(args.recursive, '.sh', inTidy) - visitDir(args.recursive, '.py', pythonTidy) - visitDir(args.recursive, '.c', cTidy) - visitDir(args.recursive, '.h', cTidy) - visitDir(args.recursive, '.cc', cTidy) - visitDir(args.recursive, '.def', m2Tidy) - visitDir(args.recursive, '.mod', m2Tidy) - visitDir(args.recursive, '.bnf', bnfTidy) +def find_files(): + # find_files for each file extension call the appropriate tidy routine. + visit_dir(args.recursive, '.h.in', c_tidy) + visit_dir(args.recursive, '.in', in_tidy) + visit_dir(args.recursive, '.sh', in_tidy) + visit_dir(args.recursive, '.py', python_tidy) + visit_dir(args.recursive, '.c', c_tidy) + visit_dir(args.recursive, '.h', c_tidy) + visit_dir(args.recursive, '.cc', c_tidy) + visit_dir(args.recursive, '.def', m2_tidy) + visit_dir(args.recursive, '.mod', m2_tidy) + visit_dir(args.recursive, '.bnf', bnf_tidy) -def handleArguments(): - # handleArguments create and return the args object. +def handle_arguments(): + # handle_arguments create and return the args object. parser = argparse.ArgumentParser() parser.add_argument("-c", "--contribution", help="set the contribution string " + @@ -509,40 +509,40 @@ def handleArguments(): return args -def hasExt(name, ext): - # hasExt return True if, name, ends with, ext. +def has_ext(name, ext): + # has_ext return True if, name, ends with, ext. if len(name) > len(ext): return name[-len(ext):] == ext return False -def singleFile(name): - # singleFile scan the single file for a GPL boilerplate which +def single_file(name): + # single_file scan the single file for a GPL boilerplate which # has a GPL, contribution field and a summary heading. - if hasExt(name, ".def") or hasExt(name, ".mod"): - m2Tidy(name) - elif hasExt(name, ".h") or hasExt(name, ".c") or hasExt(name, ".cc"): - cTidy(name) - elif hasExt(name, ".in"): - inTidy(name) - elif hasExt(name, ".sh"): - inTidy(name) # uses magic number for actual sh/bash - elif hasExt(name, ".py"): - pythonTidy(name) + if has_ext(name, ".def") or has_ext(name, ".mod"): + m2_tidy(name) + elif has_ext(name, ".h") or has_ext(name, ".c") or has_ext(name, ".cc"): + c_tidy(name) + elif has_ext(name, ".in"): + in_tidy(name) + elif has_ext(name, ".sh"): + in_tidy(name) # uses magic number for actual sh/bash + elif has_ext(name, ".py"): + python_tidy(name) def main(): - # main - handleArguments and then find source files. - global args, outputName - args = handleArguments() - outputName = args.outputfile + # main - handle_arguments and then find source files. + global args, output_name + args = handle_arguments() + output_name = args.outputfile if args.recursive: - findFiles() + find_files() elif args.inputfile is None: print("an input file must be specified on the command line") else: - singleFile(args.inputfile) - haltOnError() + single_file(args.inputfile) + halt_on_error() main() diff --git a/gcc/m2/tools-src/def2doc.py b/gcc/m2/tools-src/def2doc.py index 0412d518eb7..7afba96b668 100755 --- a/gcc/m2/tools-src/def2doc.py +++ b/gcc/m2/tools-src/def2doc.py @@ -26,281 +26,281 @@ import argparse import os import sys -BaseLibs = ["gm2-libs", "Base libraries", "Basic M2F compatible libraries"] +Base_Libs = ['gm2-libs', 'Base libraries', 'Basic M2F compatible libraries'] -PIMLogDesc = "PIM and Logitech 3.0 compatible libraries" -PIMLog = ["gm2-libs-pim", "PIM and Logitech 3.0 Compatible", PIMLogDesc] -PIMCorDesc = "PIM compatible process support" -PIMCor = ["gm2-libs-coroutines", "PIM coroutine support", PIMCorDesc] -ISOLibs = ["gm2-libs-iso", "M2 ISO Libraries", "ISO defined libraries"] +PIM_Log_Desc = 'PIM and Logitech 3.0 compatible libraries' +PIM_Log = ['gm2-libs-pim', 'PIM and Logitech 3.0 Compatible', PIM_Log_Desc] +PIM_Cor_Desc = 'PIM compatible process support' +PIM_Cor = ['gm2-libs-coroutines', 'PIM coroutine support', PIM_Cor_Desc] +ISO_Libs = ['gm2-libs-iso', 'M2 ISO Libraries', 'ISO defined libraries'] -libraryClassifications = [BaseLibs, PIMLog, PIMCor, ISOLibs] +library_classifications = [Base_Libs, PIM_Log, PIM_Cor, ISO_Libs] -def initState(): - global inVar, inType, inConst - inVar, inType, inConst = False, False, False +def init_state(): + global in_var, in_type, in_const + in_var, in_type, in_const = False, False, False -def emitNode(name, nxt, previous, up): +def emit_node(name, nxt, previous, up): if args.texinfo: - output.write("@node " + name + ", " + nxt + ", ") - output.write(previous + ", " + up + "\n") + output.write('@node ' + name + ', ' + nxt + ', ') + output.write(previous + ', ' + up + '\n') elif args.sphinx: - output.write("@c @node " + name + ", " + nxt + ", ") - output.write(previous + ", " + up + "\n") + output.write('@c @node ' + name + ', ' + nxt + ', ') + output.write(previous + ', ' + up + '\n') -def emitSection(name): +def emit_section(name): if args.texinfo: - output.write("@section " + name + "\n") + output.write('@section ' + name + '\n') elif args.sphinx: - output.write(name + "\n") - output.write("=" * len(name) + "\n") + output.write(name + '\n') + output.write('=' * len(name) + '\n') -def emitSubSection(name): +def emit_sub_section(name): if args.texinfo: - output.write("@subsection " + name + "\n") + output.write('@subsection ' + name + '\n') elif args.sphinx: - output.write(name + "\n") - output.write("-" * len(name) + "\n") + output.write(name + '\n') + output.write('-' * len(name) + '\n') -def displayLibraryClass(): - # displayLibraryClass displays a node for a library directory and invokes +def display_library_class(): + # display_library_class displays a node for a library directory and invokes # a routine to summarize each module. global args - previous = "" - nxt = libraryClassifications[1][1] + previous = '' + nxt = library_classifications[1][1] i = 0 - lib = libraryClassifications[i] + lib = library_classifications[i] while True: - emitNode(lib[1], nxt, previous, args.up) - emitSection(lib[1]) - output.write("\n") - displayModules(lib[1], lib[0], args.builddir, args.sourcedir) - output.write("\n") - output.write("@c " + "-" * 60 + "\n") + emit_node(lib[1], nxt, previous, args.up) + emit_section(lib[1]) + output.write('\n') + display_modules(lib[1], lib[0], args.builddir, args.sourcedir) + output.write('\n') + output.write('@c ' + '-' * 60 + '\n') previous = lib[1] i += 1 - if i == len(libraryClassifications): + if i == len(library_classifications): break - lib = libraryClassifications[i] - if i+1 == len(libraryClassifications): - nxt = "" + lib = library_classifications[i] + if i+1 == len(library_classifications): + nxt = '' else: - nxt = libraryClassifications[i+1][1] + nxt = library_classifications[i+1][1] -def displayMenu(): - # displayMenu displays the top level menu for library documentation. - output.write("@menu\n") - for lib in libraryClassifications: - output.write("* " + lib[1] + "::" + lib[2] + "\n") - output.write("@end menu\n") - output.write("\n") - output.write("@c " + "=" * 60 + "\n") - output.write("\n") +def display_menu(): + # display_menu displays the top level menu for library documentation. + output.write('@menu\n') + for lib in library_classifications: + output.write('* ' + lib[1] + '::' + lib[2] + '\n') + output.write('@end menu\n') + output.write('\n') + output.write('@c ' + '=' * 60 + '\n') + output.write('\n') -def removeInitialComments(file, line): - # removeInitialComments removes any (* *) at the top +def remote_initial_comments(file, line): + # remote_initial_comments removes any (* *) at the top # of the definition module. - while (str.find(line, "*)") == -1): + while (line.find('*)') == -1): line = file.readline() -def removeableField(line): - # removeableField - returns True if a comment field should be removed +def removeable_field(line): + # removeable_field - returns True if a comment field should be removed # from the definition module. - field_list = ["Author", "Last edit", "LastEdit", "Last update", - "Date", "Title", "Revision"] + field_list = ['Author', 'Last edit', 'LastEdit', 'Last update', + 'Date', 'Title', 'Revision'] for field in field_list: - if (str.find(line, field) != -1) and (str.find(line, ":") != -1): + if (line.find(field) != -1) and (line.find(':') != -1): return True - ignore_list = ["System", "SYSTEM"] + ignore_list = ['System', 'SYSTEM'] for ignore_field in ignore_list: - if str.find(line, ignore_field) != -1: - if str.find(line, ":") != -1: - if str.find(line, "Description:") == -1: + if line.find(ignore_field) != -1: + if line.find(':') != -1: + if line.find('Description:') == -1: return True return False -def removeFields(file, line): - # removeFields removes Author/Date/Last edit/SYSTEM/Revision +def remove_fields(file, line): + # remove_fields removes Author/Date/Last edit/SYSTEM/Revision # fields from a comment within the start of a definition module. - while (str.find(line, "*)") == -1): - if not removeableField(line): + while (line.find('*)') == -1): + if not removeable_field(line): output.write(str.replace(str.replace(str.rstrip(line), - "{", "@{"), "}", "@}") + "\n") + '{', '@{'), '}', '@}') + '\n') line = file.readline() - output.write(str.rstrip(line) + "\n") + output.write(line.rstrip() + '\n') -def checkIndex(line): - # checkIndex - create an index entry for a PROCEDURE, TYPE, CONST or VAR. - global inVar, inType, inConst +def check_index(line): + # check_index - create an index entry for a PROCEDURE, TYPE, CONST or VAR. + global in_var, in_type, in_const - words = str.split(line) - procedure = "" - if (len(words) > 1) and (words[0] == "PROCEDURE"): - inConst = False - inType = False - inVar = False - if (words[1] == "__BUILTIN__") and (len(words) > 2): + words = line.split() + procedure = '' + if (len(words) > 1) and (words[0] == 'PROCEDURE'): + in_const = False + in_type = False + in_var = False + if (words[1] == '__BUILTIN__') and (len(words) > 2): procedure = words[2] else: procedure = words[1] - if (len(line) > 1) and (line[0:2] == "(*"): - inConst = False - inType = False - inVar = False - elif line == "VAR": - inConst = False - inVar = True - inType = False + if (len(line) > 1) and (line[0:2] == '(*'): + in_const = False + in_type = False + in_var = False + elif line == 'VAR': + in_const = False + in_var = True + in_type = False return - elif line == "TYPE": - inConst = False - inType = True - inVar = False + elif line == 'TYPE': + in_const = False + in_type = True + in_var = False return - elif line == "CONST": - inConst = True - inType = False - inVar = False - if inVar: - words = str.split(line, ",") + elif line == 'CONST': + in_const = True + in_type = False + in_var = False + if in_var: + words = line.split(',') for word in words: - word = str.lstrip(word) - if word != "": - if str.find(word, ":") == -1: - output.write("@findex " + word + " (var)\n") + word = word.lstrip() + if word != '': + if word.find(':') == -1: + output.write('@findex ' + word + ' (var)\n') elif len(word) > 0: - var = str.split(word, ":") + var = word.split(':') if len(var) > 0: - output.write("@findex " + var[0] + " (var)\n") - - if inType: - words = str.lstrip(line) - if str.find(words, "=") != -1: - word = str.split(words, "=") - if (len(word[0]) > 0) and (word[0][0] != "_"): - output.write("@findex " + str.rstrip(word[0]) + " (type)\n") + output.write('@findex ' + var[0] + ' (var)\n') + + if in_type: + words = line.lstrip() + if words.find('=') != -1: + word = words.split('=') + if (len(word[0]) > 0) and (word[0][0] != '_'): + output.write('@findex ' + word[0].rstrip() + ' (type)\n') else: - word = str.split(words) - if (len(word) > 1) and (word[1] == ";"): + word = words.split() + if (len(word) > 1) and (word[1] == ';'): # hidden type - if (len(word[0]) > 0) and (word[0][0] != "_"): - output.write("@findex " + str.rstrip(word[0])) - output.write(" (type)\n") - if inConst: - words = str.split(line, ";") + if (len(word[0]) > 0) and (word[0][0] != '_'): + output.write('@findex ' + word[0].rstrip()) + output.write(' (type)\n') + if in_const: + words = line.split(';') for word in words: - word = str.lstrip(word) - if word != "": - if str.find(word, "=") != -1: - var = str.split(word, "=") + word = word.lstrip() + if word != '': + if word.find('=') != -1: + var = word.split('=') if len(var) > 0: - output.write("@findex " + var[0] + " (const)\n") - if procedure != "": - name = str.split(procedure, "(") - if name[0] != "": + output.write('@findex ' + var[0] + ' (const)\n') + if procedure != '': + name = procedure.split('(') + if name[0] != '': proc = name[0] - if proc[-1] == ";": + if proc[-1] == ';': proc = proc[:-1] - if proc != "": - output.write("@findex " + proc + "\n") + if proc != '': + output.write('@findex ' + proc + '\n') -def parseDefinition(dir, source, build, file, needPage): - # parseDefinition reads a definition module and creates +def parse_definition(dir, source, build, file, needPage): + # parse_definition reads a definition module and creates # indices for procedures, constants, variables and types. - output.write("\n") - with open(findFile(dir, build, source, file), "r") as f: - initState() + output.write('\n') + with open(find_file(dir, build, source, file), 'r') as f: + init_state() line = f.readline() - while (str.find(line, "(*") != -1): - removeInitialComments(f, line) + while (line.find('(*') != -1): + remote_initial_comments(f, line) line = f.readline() - while (str.find(line, "DEFINITION") == -1): + while (line.find('DEFINITION') == -1): line = f.readline() - output.write("@example\n") - output.write(str.rstrip(line) + "\n") + output.write('@example\n') + output.write(line.rstrip() + '\n') line = f.readline() - if len(str.rstrip(line)) == 0: - output.write("\n") + if len(line.rstrip()) == 0: + output.write('\n') line = f.readline() - if (str.find(line, "(*") != -1): - removeFields(f, line) + if (line.find('(*') != -1): + remove_fields(f, line) else: - output.write(str.rstrip(line) + "\n") + output.write(line.rstrip() + '\n') else: - output.write(str.rstrip(line) + "\n") + output.write(line.rstrip() + '\n') line = f.readline() while line: - line = str.rstrip(line) - checkIndex(line) - output.write(str.replace(str.replace(line, "{", "@{"), "}", "@}")) - output.write("\n") + line = line.rstrip() + check_index(line) + output.write(str.replace(str.replace(line, '{', '@{'), '}', '@}')) + output.write('\n') line = f.readline() - output.write("@end example\n") + output.write('@end example\n') if needPage: - output.write("@page\n") + output.write('@page\n') -def parseModules(up, dir, build, source, listOfModules): - previous = "" +def parse_modules(up, dir, build, source, list_of_modules): + previous = '' i = 0 - if len(listOfModules) > 1: - nxt = dir + "/" + listOfModules[1][:-4] + if len(list_of_modules) > 1: + nxt = dir + '/' + list_of_modules[1][:-4] else: - nxt = "" - while i < len(listOfModules): - emitNode(dir + "/" + listOfModules[i][:-4], nxt, previous, up) - emitSubSection(dir + "/" + listOfModules[i][:-4]) - parseDefinition(dir, source, build, listOfModules[i], True) - output.write("\n") - previous = dir + "/" + listOfModules[i][:-4] + nxt = '' + while i < len(list_of_modules): + emit_node(dir + '/' + list_of_modules[i][:-4], nxt, previous, up) + emit_sub_section(dir + '/' + list_of_modules[i][:-4]) + parse_definition(dir, source, build, list_of_modules[i], True) + output.write('\n') + previous = dir + '/' + list_of_modules[i][:-4] i = i + 1 - if i+1 < len(listOfModules): - nxt = dir + "/" + listOfModules[i+1][:-4] + if i+1 < len(list_of_modules): + nxt = dir + '/' + list_of_modules[i+1][:-4] else: - nxt = "" + nxt = '' -def doCat(name): - # doCat displays the contents of file, name, to stdout - with open(name, "r") as file: +def do_cat(name): + # do_cat displays the contents of file, name, to stdout + with open(name, 'r') as file: line = file.readline() while line: - output.write(str.rstrip(line) + "\n") + output.write(line.rstrip() + '\n') line = file.readline() -def moduleMenu(dir, build, source): - # moduleMenu generates a simple menu for all definition modules +def module_menu(dir, build, source): + # module_menu generates a simple menu for all definition modules # in dir - output.write("@menu\n") - listOfFiles = [] + output.write('@menu\n') + list_of_files = [] if os.path.exists(os.path.join(source, dir)): - listOfFiles += os.listdir(os.path.join(source, dir)) + list_of_files += os.listdir(os.path.join(source, dir)) if os.path.exists(os.path.join(source, dir)): - listOfFiles += os.listdir(os.path.join(build, dir)) - listOfFiles = list(dict.fromkeys(listOfFiles).keys()) - listOfFiles.sort() - for file in listOfFiles: - if foundFile(dir, build, source, file): - if (len(file) > 4) and (file[-4:] == ".def"): - output.write("* " + dir + "/" + file[:-4] + "::" + file + "\n") - output.write("@end menu\n") - output.write("\n") - - -def checkDirectory(dir, build, source): - # checkDirectory - returns True if dir exists in either build or source. + list_of_files += os.listdir(os.path.join(build, dir)) + list_of_files = list(dict.fromkeys(list_of_files).keys()) + list_of_files.sort() + for file in list_of_files: + if found_file(dir, build, source, file): + if (len(file) > 4) and (file[-4:] == '.def'): + output.write('* ' + dir + '/' + file[:-4] + '::' + file + '\n') + output.write('@end menu\n') + output.write('\n') + + +def check_directory(dir, build, source): + # check_directory - returns True if dir exists in either build or source. if os.path.isdir(build) and os.path.exists(os.path.join(build, dir)): return True elif os.path.isdir(source) and os.path.exists(os.path.join(source, dir)): @@ -309,8 +309,8 @@ def checkDirectory(dir, build, source): return False -def foundFile(dir, build, source, file): - # foundFile return True if file is found in build/dir/file or +def found_file(dir, build, source, file): + # found_file return True if file is found in build/dir/file or # source/dir/file. name = os.path.join(os.path.join(build, dir), file) if os.path.exists(name): @@ -321,8 +321,8 @@ def foundFile(dir, build, source, file): return False -def findFile(dir, build, source, file): - # findFile return the path to file searching in build/dir/file +def find_file(dir, build, source, file): + # find_file return the path to file searching in build/dir/file # first then source/dir/file. name1 = os.path.join(os.path.join(build, dir), file) if os.path.exists(name1): @@ -330,90 +330,90 @@ def findFile(dir, build, source, file): name2 = os.path.join(os.path.join(source, dir), file) if os.path.exists(name2): return name2 - sys.stderr.write("file cannot be found in either " + name1) - sys.stderr.write(" or " + name2 + "\n") + sys.stderr.write('file cannot be found in either ' + name1) + sys.stderr.write(' or ' + name2 + '\n') os.sys.exit(1) -def displayModules(up, dir, build, source): - # displayModules walks though the files in dir and parses +def display_modules(up, dir, build, source): + # display_modules walks though the files in dir and parses # definition modules and includes README.texi - if checkDirectory(dir, build, source): - if foundFile(dir, build, source, "README.texi"): - doCat(findFile(dir, build, source, "README.texi")) - moduleMenu(dir, build, source) - listOfFiles = [] + if check_directory(dir, build, source): + if found_file(dir, build, source, 'README.texi'): + do_cat(find_file(dir, build, source, 'README.texi')) + module_menu(dir, build, source) + list_of_files = [] if os.path.exists(os.path.join(source, dir)): - listOfFiles += os.listdir(os.path.join(source, dir)) + list_of_files += os.listdir(os.path.join(source, dir)) if os.path.exists(os.path.join(source, dir)): - listOfFiles += os.listdir(os.path.join(build, dir)) - listOfFiles = list(dict.fromkeys(listOfFiles).keys()) - listOfFiles.sort() - listOfModules = [] - for file in listOfFiles: - if foundFile(dir, build, source, file): - if (len(file) > 4) and (file[-4:] == ".def"): - listOfModules += [file] - listOfModules.sort() - parseModules(up, dir, build, source, listOfModules) + list_of_files += os.listdir(os.path.join(build, dir)) + list_of_files = list(dict.fromkeys(list_of_files).keys()) + list_of_files.sort() + list_of_modules = [] + for file in list_of_files: + if found_file(dir, build, source, file): + if (len(file) > 4) and (file[-4:] == '.def'): + list_of_modules += [file] + list_of_modules.sort() + parse_modules(up, dir, build, source, list_of_modules) else: - line = "directory " + dir + " not found in either " - line += build + " or " + source - sys.stderr.write(line + "\n") + line = 'directory ' + dir + ' not found in either ' + line += build + ' or ' + source + sys.stderr.write(line + '\n') -def displayCopyright(): - output.write("@c Copyright (C) 2000-2022 Free Software Foundation, Inc.\n") - output.write("@c This file is part of GNU Modula-2.\n") - output.write(""" +def display_copyright(): + output.write('@c Copyright (C) 2000-2022 Free Software Foundation, Inc.\n') + output.write('@c This file is part of GNU Modula-2.\n') + output.write(''' @c Permission is granted to copy, distribute and/or modify this document @c under the terms of the GNU Free Documentation License, Version 1.2 or @c any later version published by the Free Software Foundation. -""") +''') -def collectArgs(): +def collect_args(): parser = argparse.ArgumentParser() - parser.add_argument("-v", "--verbose", help="generate progress messages", - action="store_true") - parser.add_argument("-b", "--builddir", help="set the build directory", - default=".", action="store") - parser.add_argument("-f", "--inputfile", help="set the input file", - default=None, action="store") - parser.add_argument("-o", "--outputfile", help="set the output file", - default=None, action="store") - parser.add_argument("-s", "--sourcedir", help="set the source directory", - default=".", action="store") - parser.add_argument("-t", "--texinfo", - help="generate texinfo documentation", - default=False, action="store_true") - parser.add_argument("-u", "--up", help="set the up node", - default="", action="store") - parser.add_argument("-x", "--sphinx", help="generate sphinx documentation", - default=False, action="store_true") + parser.add_argument('-v', '--verbose', help='generate progress messages', + action='store_true') + parser.add_argument('-b', '--builddir', help='set the build directory', + default='.', action='store') + parser.add_argument('-f', '--inputfile', help='set the input file', + default=None, action='store') + parser.add_argument('-o', '--outputfile', help='set the output file', + default=None, action='store') + parser.add_argument('-s', '--sourcedir', help='set the source directory', + default='.', action='store') + parser.add_argument('-t', '--texinfo', + help='generate texinfo documentation', + default=False, action='store_true') + parser.add_argument('-u', '--up', help='set the up node', + default='', action='store') + parser.add_argument('-x', '--sphinx', help='generate sphinx documentation', + default=False, action='store_true') args = parser.parse_args() return args -def handleFile(): +def handle_file(): if args.inputfile is None: - displayCopyright() - displayMenu() - displayLibraryClass() + display_copyright() + display_menu() + display_library_class() else: - parseDefinition(".", args.sourcedir, args.builddir, + parse_definition('.', args.sourcedir, args.builddir, args.inputfile, False) def main(): global args, output - args = collectArgs() + args = collect_args() if args.outputfile is None: output = sys.stdout - handleFile() + handle_file() else: - with open(args.outputfile, "w") as output: - handleFile() + with open(args.outputfile, 'w') as output: + handle_file() main() diff --git a/gcc/m2/tools-src/tidydates.py b/gcc/m2/tools-src/tidydates.py index 11fb4b90c49..a28c0f1669a 100644 --- a/gcc/m2/tools-src/tidydates.py +++ b/gcc/m2/tools-src/tidydates.py @@ -26,27 +26,27 @@ import sys import pathlib import shutil -maxLineLength = 60 +max_line_length = 60 -COPYRIGHT = "Copyright (C)" +COPYRIGHT = 'Copyright (C)' -def visitDir(directory, ext, func): - # visitDir - call func for each file below, dir, matching extension, ext. - listOfFiles = os.listdir(directory) - listOfFiles.sort() - for filename in listOfFiles: +def visit_dir(directory, ext, func): + # visit_dir - call func for each file below, dir, matching extension, ext. + list_of_files = os.listdir(directory) + list_of_files.sort() + for filename in list_of_files: path = pathlib.PurePath(filename) full = os.path.join(directory, filename) if path.is_file(full): if path.suffix == ext: func(full) elif path.is_dir(full): - visitDir(full, ext, func) + visit_dir(full, ext, func) -def isYear(year): - # isYear - returns True if, year, is legal. +def is_year(year): + # is_year - returns True if, year, is legal. if len(year) == 5: year = year[:-1] for c in year: @@ -55,112 +55,112 @@ def isYear(year): return True -def handleCopyright(outfile, lines, n, leader1, leader2): - # handleCopyright look for Copyright in the comment. - global maxLineLength +def handle_copyright(outfile, lines, n, leader1, leader2): + # handle_copyright look for Copyright in the comment. + global max_line_length i = lines[n] c = i.find(COPYRIGHT)+len(COPYRIGHT) outfile.write(i[:c]) d = i[c:].split() start = c - seenDate = True + seen_date = True years = [] - while seenDate: + while seen_date: if d == []: n += 1 i = lines[n] d = i[2:].split() else: e = d[0] - punctuation = "" + punctuation = '' if len(d) == 1: d = [] else: d = d[1:] - if c > maxLineLength: - outfile.write("\n") + if c > max_line_length: + outfile.write('\n') outfile.write(leader1) outfile.write(leader2) - outfile.write(" "*(start-2)) + outfile.write(' '*(start-2)) c = start - if isYear(e): - if (e[-1] == ".") or (e[-1] == ","): + if is_year(e): + if (e[-1] == '.') or (e[-1] == ','): punctuation = e[-1] e = e[:-1] else: - punctuation = "" + punctuation = '' else: - seenDate = False - if seenDate: + seen_date = False + if seen_date: if not (e in years): c += len(e) + len(punctuation) - outfile.write(" ") + outfile.write(' ') outfile.write(e) outfile.write(punctuation) years += [e] else: if start < c: - outfile.write("\n") + outfile.write('\n') outfile.write(leader1) outfile.write(leader2) - outfile.write(" "*(start-2)) + outfile.write(' '*(start-2)) - outfile.write(" ") + outfile.write(' ') outfile.write(e) outfile.write(punctuation) for w in d: - outfile.write(" ") + outfile.write(' ') outfile.write(w) - outfile.write("\n") + outfile.write('\n') return outfile, n+1 -def handleHeader(filename, leader1, leader2): - # handleHeader reads in the header of a file and inserts +def handle_header(filename, leader1, leader2): + # handle_header reads in the header of a file and inserts # a line break around the Copyright dates. - print("------------------------------") - lines = open(filename, "r").readlines() + print('------------------------------') + lines = open(filename).readlines() if len(lines) > 20: - with open("tmptidy", "w") as outfile: + with open('tmptidy', 'w') as outfile: n = 0 for i in lines: - if i.find("Copyright (C)") >= 0: - outfile, n = handleCopyright(outfile, lines, + if i.find('Copyright (C)') >= 0: + outfile, n = handle_copyright(outfile, lines, n, leader1, leader2) outfile.writelines(lines[n:]) outfile.close() - print("-> mv tmptidy", filename) - shutil.move("tmptidy", filename) + print('-> mv tmptidy', filename) + shutil.move('tmptidy', filename) return else: outfile.write(lines[n]) n += 1 - sys.stdout.write("%s:1:1 needs a Copyright notice..\n" % filename) + sys.stdout.write('%s:1:1 needs a Copyright notice..\n' % filename) -def bashTidy(filename): - # bashTidy - tidy up dates using "#" comment - handleHeader(filename, "#", " ") +def bash_tidy(filename): + # bash_tidy - tidy up dates using '#' comment + handle_header(filename, '#', ' ') -def cTidy(filename): - # cTidy - tidy up dates using "/* */" comments - handleHeader(filename, " ", "*") +def c_tidy(filename): + # c_tidy - tidy up dates using '/* */' comments + handle_header(filename, ' ', '*') -def m2Tidy(filename): - # m2Tidy - tidy up dates using "(* *)" comments - handleHeader(filename, " ", " ") +def m2_tidy(filename): + # m2_tidy - tidy up dates using '(* *)' comments + handle_header(filename, ' ', ' ') def main(): # main - for each file extension call the appropriate tidy routine. - visitDir(".", ".in", bashTidy) - visitDir(".", ".py", bashTidy) - visitDir(".", ".c", cTidy) - visitDir(".", ".h", cTidy) - visitDir(".", ".def", m2Tidy) - visitDir(".", ".mod", m2Tidy) + visit_dir('.', '.in', bash_tidy) + visit_dir('.', '.py', bash_tidy) + visit_dir('.', '.c', c_tidy) + visit_dir('.', '.h', c_tidy) + visit_dir('.', '.def', m2_tidy) + visit_dir('.', '.mod', m2_tidy) main()