Kaydet (Commit) e8fbdb37 authored tarafından Riccardo Magliocchetti's avatar Riccardo Magliocchetti Kaydeden (comit) Laurent Godard

Cleanup parse-perfcheck.py

Use more idiomatic python for file, path, csv handling and options
parsing.
There's still quite a bit of stuff to cleanup, at least it would be
nice to kill the remaining globals.

Fixed indentation on parts i've touched, the rest of the file is
still a bit off.

Change-Id: I4214078c38a1e26cca17d09ebb9c0f53ba429ea9
Reviewed-on: https://gerrit.libreoffice.org/12405Tested-by: 's avatarLaurent Godard <lgodard.libre@laposte.net>
Reviewed-by: 's avatarLaurent Godard <lgodard.libre@laposte.net>
üst 710c6640
...@@ -7,63 +7,39 @@ ...@@ -7,63 +7,39 @@
import sys import sys
import os import os
import getopt
import csv
parseTrigger = "desc: Trigger: Client Request: "
parseTotal = "totals: "
separator = os.path.sep
lastCommitId = ""
lastCommitDate = ""
needsCsvHeader = True # needs header in csv file ? yes if new
colsResult = {} colsResult = {}
allTests = [] allTests = []
def processDirectory(rootDir): def parseFile(dirname, filename, lastCommit):
if needsCsvHeader:
intermediateResult = "lastCommit\tlastCommitDate\ttest filename\tdump comment\tcount\n"
else:
intermediateResult = ""
for dirName, subdirList, fileList in os.walk(rootDir):
files = [ fi for fi in fileList if fi.startswith("callgrind.out.") ] curTestComment, total = None, None
for fname in files:
found = parseFile(dirName, fname)
if found != "":
intermediateResult += found
return intermediateResult path = os.path.join(dirname, filename)
def parseFile(dirname, filename): trigger = "desc: Trigger: Client Request: "
trigger_len = len(trigger)
totals = "totals: "
totals_len = len(totals)
path = dirname + separator + filename with open(path,'r') as callgrindFile:
callgrindFile = open(path,'r') lines = callgrindFile.readlines()
lines = callgrindFile.readlines()
curTestComment = "" for line in lines:
total = "0" if line.startswith(trigger):
curTestComment = line[trigger_len:].replace("\n","")
elif line.startswith(totals):
total = line[totals_len:].replace("\n","")
for line in lines: if curTestComment is None or total is None:
if line.startswith(parseTrigger): return None
curTestComment = line[len(parseTrigger):].replace("\n","")
elif line.startswith(parseTotal):
total = line[len(parseTotal):].replace("\n","")
callgrindFile.close() testName = os.path.basename(dirname).replace(".test.core","")
if curTestComment == "":
return ""
if total == "0": # should not occur, btw
return ""
dirs = dirname.split(separator)
currentTest = dirs[-1:]
testName = currentTest[0].replace(".test.core","")
lastCommitId, lastCommitDate = lastCommit
if lastCommitId not in colsResult: if lastCommitId not in colsResult:
colsResult[lastCommitId] = {} colsResult[lastCommitId] = {}
colsResult[lastCommitId]['date'] = lastCommitDate colsResult[lastCommitId]['date'] = lastCommitDate
...@@ -71,9 +47,22 @@ def parseFile(dirname, filename): ...@@ -71,9 +47,22 @@ def parseFile(dirname, filename):
colsResult[lastCommitId]['values'][curTestComment] = total colsResult[lastCommitId]['values'][curTestComment] = total
result = lastCommitId + "\t" + lastCommitDate + "\t" + testName + "\t" + curTestComment + "\t" + total + "\n" return [lastCommitId, lastCommitDate, testName, curTestComment, total]
def processDirectory(rootDir, needsCsvHeader, lastCommit):
return result results = []
if needsCsvHeader:
results.append(["lastCommit", "lastCommitDate", "test filename", "dump comment", "count"])
for dirName, subdirList, fileList in os.walk(rootDir):
files = [f for f in fileList if f.startswith("callgrind.out.")]
for fname in files:
found = parseFile(dirName, fname, lastCommit)
if found is not None:
results.append(found)
return results
def getLastCommitInfo(): def getLastCommitInfo():
...@@ -90,7 +79,7 @@ def displayUsage(): ...@@ -90,7 +79,7 @@ def displayUsage():
usage = """ usage = """
Parses the callgrind results of make percheck Parses the callgrind results of make perfcheck
Arguments : Arguments :
...@@ -108,91 +97,71 @@ Alerts, if any, are displayed in standard output ...@@ -108,91 +97,71 @@ Alerts, if any, are displayed in standard output
""" """
print(usage) print(usage)
class WrongArguments(Exception):
pass
def analyzeArgs(args): def analyzeArgs(args):
isValid = True try:
opts, args = getopt.getopt(args, 'x', [
'csv-file=', 'source-directory=', 'alert-type=', 'alert-value=', 'help'])
except getopt.GetoptError:
raise WrongArguments
targetFileName = "perfcheckResult.csv" targetFileName = "perfcheckResult.csv"
sourceDirectory = "./workdir/CppunitTest" sourceDirectory = "./workdir/CppunitTest"
alertType = "" alertType = ""
alertValue = 10 alertValue = 10
if "--help" in args: for o, a in opts:
isValid = False if o == '--help':
displayUsage()
if isValid: sys.exit()
elif o == "--csv-file":
for arg in args[1:]: targetFileName = a
elif o == "--source-directory":
found = False sourceDirectory = a
elif o == "--alert-type":
if arg.startswith("--csv-file"): alertType = a
spliter = arg.split("=") elif o == "--alert-value":
if spliter[1] != "": alertValue = float(a)
targetFileName = spliter[1] else:
found = True raise WrongArguments
elif arg.startswith("--source-directory"):
spliter = arg.split("=")
if spliter[1] != "":
sourceDirectory = spliter[1]
found = True
elif arg.startswith("--alert-type"):
spliter = arg.split("=")
if spliter[1] in ['previous','first']:
alertType = spliter[1]
found = True
else:
isValid = False
elif arg.startswith("--alert-value"): return targetFileName, sourceDirectory, alertType, alertValue
spliter = arg.split("=")
if spliter[1] != "":
alertValue = float(spliter[1])
found = True
isValid = isValid and found def readCsvFile(targetFilename):
return isValid, targetFileName, sourceDirectory, alertType, alertValue with open(targetFilename, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter="\t")
# skip header
next(reader)
for line in reader:
def readCsvFile(): # do not process empty lines
if not line:
continue
fileResult = open(targetFileName,'r') curId, curDate, curTestName, curTestComment, curValue = line
lines = fileResult.readlines()
fileResult.close
lines = lines[1:] #skip header if curTestComment not in allTests:
allTests.append(curTestComment)
for line in lines: if curId not in colsResult:
colsResult[curId] = {}
colsResult[curId]['date'] = curDate
colsResult[curId]['values'] = {}
if line.strip() != "": # do not process empty lines colsResult[curId]['values'][curTestComment] = curValue
spliter = line.replace('\n','').split('\t')
curId = spliter[0]
curDate = spliter[1]
curTestName = spliter[2]
curTestComment = spliter[3]
curValue = spliter[4]
if curTestComment not in allTests:
allTests.append(curTestComment)
if curId not in colsResult:
colsResult[curId] = {}
colsResult[curId]['date'] = curDate
colsResult[curId]['values'] = {}
colsResult[curId]['values'][curTestComment] = curValue
if __name__ == '__main__': if __name__ == '__main__':
#check args #check args
isOk, targetFileName, sourceDirectory, alertType, alertValue = analyzeArgs(sys.argv) try:
targetFileName, sourceDirectory, alertType, alertValue = analyzeArgs(sys.argv[1:])
if not isOk: except WrongArguments:
displayUsage() displayUsage()
sys.exit(1) sys.exit(1)
# check if sourceDirectorty exists # check if sourceDirectorty exists
if not os.path.isdir(sourceDirectory): if not os.path.isdir(sourceDirectory):
...@@ -201,27 +170,31 @@ if __name__ == '__main__': ...@@ -201,27 +170,31 @@ if __name__ == '__main__':
# read the complete CSV file # read the complete CSV file
if os.path.isfile(targetFileName): if os.path.isfile(targetFileName):
readCsvFile() readCsvFile(targetFileName)
needsCsvHeader = False needsCsvHeader = False
else:
needsCsvHeader = True
# last commit Id # last commit Id
lastCommitId, lastCommitDate = getLastCommitInfo() lastCommitId, lastCommitDate = getLastCommitInfo()
# walker through directory # walker through directory
if not lastCommitId in colsResult: if lastCommitId not in colsResult:
newResult = processDirectory(sourceDirectory) lastCommit = (lastCommitId, lastCommitDate)
results = processDirectory(sourceDirectory, needsCsvHeader, lastCommit)
ppResults = "\n".join(["\t".join(row) for row in results])
print('\nNew results\n' + newResult) print('\nNew results\n' + ppResults)
# append raw result # append raw result
with open(targetFileName,'a') as fileResult: with open(targetFileName,'a') as csvfile:
fileResult.write(newResult) writer = csv.writer(csvfile, delimiter='\t')
writer.writerows(results)
print("\nCSV file written at " + targetFileName + '\n') print("\nCSV file written at " + targetFileName + '\n')
else: else:
print("\nCSV file up to date " + targetFileName + '\n') print("\nCSV file up to date " + targetFileName + '\n')
# build columned output # build columned output
...@@ -231,28 +204,21 @@ if __name__ == '__main__': ...@@ -231,28 +204,21 @@ if __name__ == '__main__':
alertTest = {} alertTest = {}
for k in colsResult: with open(targetFileName + '.col','w') as fileResult:
for k in colsResult:
mLine += k + "\t" + colsResult[k]['date'] + "\t" mLine += k + "\t" + colsResult[k]['date'] + "\t"
for t in allTests:
for t in allTests: if t in colsResult[k]['values']:
mValue= colsResult[k]['values'][t]
if t in colsResult[k]['values']: if t not in alertTest:
mValue= colsResult[k]['values'][t] alertTest[t] = {}
alertTest[t][colsResult[k]['date']] = mValue
if not t in alertTest: else:
alertTest[t] = {} mValue = ""
alertTest[t][colsResult[k]['date']] = mValue mLine += mValue + "\t"
mLine += "\n"
else:
mValue = "" # write columned result
mLine += mValue + "\t"
mLine += "\n"
# write columned result
with open(targetFileName + '.col','w') as fileResult:
fileResult.write(mLine) fileResult.write(mLine)
print("Columned file written at " + targetFileName + '.col\n') print("Columned file written at " + targetFileName + '.col\n')
...@@ -276,7 +242,6 @@ if __name__ == '__main__': ...@@ -276,7 +242,6 @@ if __name__ == '__main__':
if alertType == "previous": if alertType == "previous":
if len(keylist) > 1: if len(keylist) > 1:
minVal = float(testDict[keylist[-2]]) minVal = float(testDict[keylist[-2]])
else: else:
minVal = float(testDict[keylist[0]]) minVal = float(testDict[keylist[0]])
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment