Remove old tools which is unused or not work anymore [skip ci]

This commit is contained in:
halx99 2023-08-26 01:50:30 +08:00
parent fe21a11e55
commit 94442321e7
9 changed files with 2 additions and 1237 deletions

View File

@ -1,175 +0,0 @@
#!/usr/bin/env python
#coding=utf-8
import os, re, argparse, sys
import posixpath
class Path:
@staticmethod
def _forward_slash(p):
return p.replace(os.path.sep, '/')
@staticmethod
def join(p, *paths):
return Path._forward_slash(posixpath.join(p, *paths))
@staticmethod
def abspath(p):
return Path._forward_slash(posixpath.abspath(p))
@staticmethod
def normpath(p):
return Path._forward_slash(posixpath.normpath(p))
@staticmethod
def relpath(p, s):
return Path._forward_slash(posixpath.relpath(p, s))
@staticmethod
def exists(p):
return os.path.exists(p)
@staticmethod
def basename(p):
return posixpath.basename(p)
@staticmethod
def extname(p):
return posixpath.splitext(p)[1]
@staticmethod
def dirname(p):
return posixpath.dirname(p)
class LintContext:
def __init__(self, root, fix):
self.exclude = [
# exclude some platform specific files.
'extensions/spine/Json.c',
'extensions/spine/PathConstraint.h',
'extensions/spine/SkeletonJson.c',
'extensions/spine/SkeletonBinary.c',
'extensions/spine/kvec.h'
]
self.source_exts = ['.h','.hpp','.inl','.c','.cpp', '.m', '.mm']
self.header_exts = ['.h','.hpp','.inl']
self.root = root
self.fix = fix
self.errors = 0
self.error_files = 0
self._scan_source(root)
self._scan_unique_headers(self.headers)
def _scan_source(self, top):
# find all sources and headers relative to self.root
self.sources = []
self.headers = []
for root, dirnames, filenames in os.walk(top):
for f in filenames:
p = Path.relpath(Path.join(root, f), top)
if self._source_to_lint(p):
self.sources.append(p)
if self._is_header(p):
self.headers.append(p)
def _source_to_lint(self, p):
if p in self.exclude:
return False
ext = Path.extname(p)
return ext in self.source_exts
def _is_header(self, name):
return Path.extname(name) in self.header_exts
# find headers have unique base filenames
# this is used to get included headers in other search paths
def _scan_unique_headers(self, headers):
known = {}
for f in headers:
name = Path.basename(f)
if known.has_key(name):
known[name].append(f)
else:
known[name] = [f]
uniq = {}
for k,v in known.iteritems():
if len(v) == 1:
uniq[k] = v[0]
self.uniq = uniq
def in_search_path(self, filename):
return Path.exists(Path.join(self.root, filename))
def find_uniq(self, basename):
return self.uniq[basename] if self.uniq.has_key(basename) else None
def get_include_path(self, original, directory):
# 1. try search in uniq cocos header names
p = self.find_uniq(Path.basename(original))
if not p:
# 2. try search in current header directory
p = Path.normpath(Path.join(directory, original))
if not self.in_search_path(p):
return None
return p
def fix(match, cwd, ctx, fixed):
h = match.group(2)
# return original if already in search path (cocos directory)
if ctx.in_search_path(h):
return match.group(0)
p = ctx.get_include_path(h, cwd)
if not p:
return match.group(0)
ctx.errors += 1
fix = '#%s "%s"' % (match.group(1), p)
fixed[match.group(0)] = fix
return fix
def lint_one(header, ctx):
cwd = Path.dirname(header)
if not cwd:
return
filename = Path.join(ctx.root, header)
content = open(filename, 'r').read()
fixed = {}
# check all #include "header.*"
linted = re.sub('#\s*(include|import)\s*"(.*)"', lambda m: fix(m, cwd, ctx, fixed), content)
if content != linted:
ctx.error_files += 1
if ctx.fix:
with open (filename, 'w') as f: f.write(linted)
print('%s: %d error(s) fixed' % (header, len(fixed)))
else:
print('%s:' % (header))
for k, v in fixed.iteritems():
print('\t%s should be %s' % (k, v))
def lint(ctx):
print('Checking headers in: %s' % ctx.root)
for f in ctx.sources:
lint_one(f, ctx)
print('Total: %d errors in %d files' % (ctx.errors, ctx.error_files))
if ctx.errors > 0:
if ctx.fix:
print('All fixed')
else:
print('Rerun this script with -f to fixes these errors')
sys.exit(1)
def main():
default_root = Path.abspath(Path.join(Path.dirname(__file__), '..', '..'))
parser = argparse.ArgumentParser(description='The axmol headers lint script.')
parser.add_argument('-f','--fix', action='store_true', help='fixe the headers while linting')
parser.add_argument('root', nargs='?', default= default_root, help='path to axmol source root directory')
args = parser.parse_args()
lint(LintContext(Path.join(args.root, 'axmol'), args.fix))
main()

View File

@ -1,61 +0,0 @@
#!/usr/bin/env python
#coding=utf-8
'''
Remove tailing whitespaces and ensures one and only one empty ending line.
'''
import os, re
def scan(*dirs, **kwargs):
files = []
extensions = kwargs['extensions'] if kwargs.has_key('extensions') else None
excludes = kwargs['excludes'] if kwargs.has_key('excludes') else []
for top in dirs:
for root, dirnames, filenames in os.walk(top):
dirnames = [i for i in dirnames if i in excludes]
for f in filenames:
if f in excludes:
continue
ext = os.path.splitext(f)[1].lower()
if extensions is None or ext in extensions:
files.append(os.path.join(root, f))
return files
def fixone(src):
lines = open(src, 'r').readlines()
trimed = []
for line in lines:
trimed.append(re.sub('\s+$', '', line))
while len(trimed) > 1 and not trimed[-1]:
trimed.pop()
trimed.append('')
with open(src, 'w') as f:
for line in trimed:
f.write('%s\n' % line)
def lint(root):
print('Checking tailing whitespaces in: %s' % root)
dirs = [
os.path.join(root, 'axmol'),
os.path.join(root, 'extensions'),
os.path.join(root, 'templates'),
os.path.join(root, 'tests'),
os.path.join(root, 'tools', 'simulator')
]
files = scan(*dirs, extensions=['.c', '.cpp', '.h', '.hpp', '.m', '.mm', '.java'])
for f in files:
print(f)
fixone(f)
def main():
default_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
lint(default_root)
main()

View File

@ -1,21 +0,0 @@
Creates archive from the current state using `git ls-files --cached --full-name --no-empty-directory`. Supports for any level of submodules tree. Files from submodules are extracted using the same command.
*License:* MIT
*Usage:* `git-archive-all [-v] [--prefix PREFIX] [--no-exclude] [--force-submodules] [--dry-run] OUTPUT_FILE`
*Options:*
**--version** Show program's version number and exit.
**-h, --help** Show this help message and exit.
**--prefix=PREFIX** Prepend PREFIX to each filename in the archive. OUTPUT_FILE name is used by default to avoid tarbomb.
**--force-submodules** Force a `git submodule init && git submodule update` at each level before iterating submodules
**-v, --verbose** Enable verbose mode.
**--no-exclude** Don't read .gitattributes files for patterns containing export-ignore attributes.
**--dry-run** Don't actually archive anything, just show what would be done.

View File

@ -1,15 +0,0 @@
{
"extra_zip_files":
[
{
"zip_config_path": "../../external/config.json",
"zip_file_path": "../../",
"extract_to_zip_path": "external"
},
{
"zip_config_path": "../console/config.json",
"zip_file_path": "../console",
"extract_to_zip_path": "tools/console"
}
]
}

View File

@ -1,635 +0,0 @@
#! /usr/bin/env python
# coding=utf-8
from __future__ import print_function
from __future__ import unicode_literals
__version__ = "1.7"
import sys
import os
import shutil
import traceback
from os import path, extsep
from subprocess import Popen, PIPE, CalledProcessError
def os_is_win32():
return sys.platform == 'win32'
class UnrecognizedFormat:
def __init__(self, prompt):
self._prompt = prompt
def __str__(self):
return self._prompt
class GitArchiver(object):
"""
GitArchiver
Scan a git repository and export all tracked files, and submodules.
Checks for .gitattributes files in each directory and uses 'export-ignore'
pattern entries for ignore files in the archive.
Automatically detects output format extension: zip, tar, bz2, or gz.
"""
def __init__(self, prefix='', verbose=False, exclude=True, force_sub=False, extra=None, main_repo_abspath=None):
"""
@type prefix: string
@param prefix: Prefix used to prepend all paths in the resulting archive.
@type verbose: bool
@param verbose: Determines verbosity of the output (stdout).
@type exclude: bool
@param exclude: Determines whether archiver should follow rules specified in .gitattributes files.
Defaults to True.
@type force_sub: bool
@param force_sub: Determines whether submodules are initialized and updated before archiving.
Defaults to False
@type extra: list
@param extra: List of extra paths to include in the resulting archive.
@type main_repo_abspath: string
@param main_repo_abspath: Absolute path to the main repository (or one of subdirectories).
If None, current cwd is used.
If given path is path to a subdirectory (but not a submodule directory!)
it will be replaced with abspath to toplevel directory of the repository.
"""
if extra is None:
extra = []
if main_repo_abspath is None:
main_repo_abspath = path.abspath('')
elif not path.isabs(main_repo_abspath):
raise ValueError("You MUST pass absolute path to the main git repository.")
# Raises an exception if there is no repo under main_repo_abspath.
try:
self.run_shell("git rev-parse", main_repo_abspath)
except Exception as e:
raise ValueError("Not a git repository (or any of the parent directories).".format(path=main_repo_abspath))
# Detect toplevel directory of the repo.
main_repo_abspath = path.abspath(self.read_git_shell('git rev-parse --show-toplevel', main_repo_abspath).rstrip())
self.prefix = prefix
self.verbose = verbose
self.exclude = exclude
self.extra = extra
self.force_sub = force_sub
self.main_repo_abspath = main_repo_abspath
def load_json_file(self, file_path):
import json
if not path.isfile(file_path):
raise Exception("Could not find (%s)" % (file_path))
with open(file_path) as data_file:
data = json.load(data_file)
return data
def ensure_directory(self, target):
if not os.path.exists(target):
os.mkdir(target)
def unpack_zipfile(self, zip_file_path, extract_dir):
"""Unpack zip `filename` to `extract_dir`
Raises ``UnrecognizedFormat`` if `filename` is not a zipfile (as determined
by ``zipfile.is_zipfile()``).
"""
import zipfile
if not zipfile.is_zipfile(zip_file_path):
raise UnrecognizedFormat("%s is not a zip file" % zip_file_path)
file_paths = []
print("==> Extracting files, please wait ...")
z = zipfile.ZipFile(zip_file_path)
try:
for info in z.infolist():
name = info.filename
# don't extract absolute paths or ones with .. in them
if name.startswith('/') or '..' in name:
continue
target = os.path.join(extract_dir, *name.split('/'))
if not target:
continue
if name.endswith('/'):
# directory
self.ensure_directory(target)
else:
# file
data = z.read(info.filename)
file_paths.append(target)
f = open(target,'wb')
try:
f.write(data)
finally:
f.close()
del data
unix_attributes = info.external_attr >> 16
if unix_attributes:
os.chmod(target, unix_attributes)
finally:
z.close()
print("==> Extraction done!")
return file_paths
def create(self, output_path, dry_run=False, output_format=None):
"""
Creates the archive, written to the given output_file_path
Type of the archive is determined either by extension of output_file_path or by the format argument.
Supported formats are: gz, zip, bz2, tar, tgz
@type output_path: string
@param output_path: Output file path.
@type dry_run: bool
@param dry_run: Determines whether create should do nothing but print what it would archive.
@type output_format: string
@param output_format: Determines format of the output archive.
If None, format is determined from extension of output_file_path.
"""
if output_format is None:
file_name, file_ext = path.splitext(output_path)
output_format = file_ext[len(extsep):].lower()
if output_format == 'zip':
from zipfile import ZipFile, ZIP_DEFLATED
if not dry_run:
archive = ZipFile(path.abspath(output_path), 'w')
add = lambda file_path, file_name: archive.write(file_path, path.join(self.prefix, file_name), ZIP_DEFLATED)
elif output_format in ['tar', 'bz2', 'gz', 'tgz']:
import tarfile
if output_format == 'tar':
t_mode = 'w'
elif output_format == 'tgz':
t_mode = 'w:gz'
else:
t_mode = 'w:{f}'.format(f=output_format)
if not dry_run:
archive = tarfile.open(path.abspath(output_path), t_mode)
add = lambda file_path, file_name: archive.add(file_path, path.join(self.prefix, file_name))
else:
raise RuntimeError("Unknown format: {f}".format(f=output_format))
print("Making archive: (%s) , please wait ..." % output_path)
for file_path in self.extra:
if not dry_run:
if self.verbose:
print("Compressing {f} => {a}...".format(f=file_path,
a=path.join(self.prefix, file_path)))
try:
add(file_path, file_path)
except:
print('add %s failed.' % file_path)
pass
else:
print("{f} => {a}".format(f=file_path,
a=path.join(self.prefix, file_path)))
for file_path in self.list_files():
if not dry_run:
if self.verbose:
print("Compressing {f} => {a}...".format(f=path.join(self.main_repo_abspath, file_path),
a=path.join(self.prefix, file_path)))
try:
add(path.join(self.main_repo_abspath, file_path), file_path)
except:
print('add %s failed.' % file_path)
pass
else:
print("{f} => {a}".format(f=path.join(self.main_repo_abspath, file_path),
a=path.join(self.prefix, file_path)))
# Execute download-deps.py in root folder
import subprocess
subprocess.call("python %s -d -f" % (path.join(self.main_repo_abspath, "download-deps.py")), shell=True, cwd=self.main_repo_abspath)
# Execute download-bin.py in console folder
console_path = path.join(self.main_repo_abspath, "tools", "console")
subprocess.call("python %s -d -f" % (path.join(console_path, "download-bin.py")), shell=True, cwd=console_path)
# Check config.json to insert a zip file content to the final generated zip file
extra_folders = []
config_data = self.load_json_file("config.json")
for zip_file in config_data["extra_zip_files"]:
zip_config = self.load_json_file(zip_file["zip_config_path"])
zip_file_name = zip_config["version"] + '.zip'
extra_to_zip_file = zip_file["extract_to_zip_path"]
zip_file_path = os.path.join(zip_file["zip_file_path"], zip_file_name)
# 'v' letter was swallowed by github, so we need to substring it from the 2nd letter
if zip_config["version"][0] == 'v':
extra_folder_name = zip_config["repo_name"] + '-' + zip_config["version"][1:]
else:
extra_folder_name = zip_config["repo_name"] + '-' + zip_config["version"]
extra_folder_path = os.path.join(self.main_repo_abspath, extra_folder_name)
extra_folders.append(extra_folder_path)
extra_file_paths = self.unpack_zipfile(zip_file_path, self.main_repo_abspath)
key_move_dirs = "move_dirs"
for file_path in extra_file_paths:
if file_path.find(extra_folder_path) == -1:
raise Exception("Couldn't find extra folder path (%s) in (%s)!" % (extra_folder_path, file_path))
path_in_zip = extra_to_zip_file + file_path[(len(extra_folder_path)):]
if key_move_dirs in zip_config:
move_dirs = zip_config[key_move_dirs]
related_path = os.path.relpath(file_path, extra_folder_path)
temp_rel_path = related_path.replace('\\', '/')
for move_dir in move_dirs:
if temp_rel_path.startswith(move_dir):
move_to_dir = move_dirs[move_dir]
path_in_zip = os.path.join(move_to_dir, related_path)
break
try:
add(file_path, path_in_zip)
except:
print('add %s failed.' % file_path)
pass
key_extra_dirs = "extra_dirs"
if key_extra_dirs in config_data:
for extra_dir in config_data[key_extra_dirs]:
dir_path = path.join(self.main_repo_abspath, extra_dir)
list_dirs = os.walk(dir_path)
for root,dirs,files in list_dirs:
for f in files:
file_path = path.join(root,f)
path_in_zip = file_path[(len(self.main_repo_abspath)+1):]
try:
add(file_path, path_in_zip)
except:
print('add %s failed.' % file_path)
pass
if not dry_run:
archive.close()
for extra_folder in extra_folders:
if os.path.exists(extra_folder):
shutil.rmtree(extra_folder)
def get_path_components(self, repo_abspath, abspath):
"""
Splits given abspath into components until repo_abspath is reached.
E.g. if repo_abspath is '/Documents/Hobby/ParaView/' and abspath is
'/Documents/Hobby/ParaView/Catalyst/Editions/Base/', function will return:
['.', 'Catalyst', 'Editions', 'Base']
First element is always '.' (concrete symbol depends on OS).
@type repo_abspath: string
@param repo_abspath: Absolute path to the git repository.
@type abspath: string
@param abspath: Absolute path to within repo_abspath.
@rtype: list
@return: List of path components.
"""
components = []
if os_is_win32():
abspath = os.path.normpath(abspath)
repo_abspath = os.path.normpath(repo_abspath)
while abspath != repo_abspath:
abspath, tail = path.split(abspath)
if len(tail):
components.insert(0, tail)
else:
while not path.samefile(abspath, repo_abspath):
abspath, tail = path.split(abspath)
if len(tail):
components.insert(0, tail)
components.insert(0, path.relpath(repo_abspath, repo_abspath))
return components
def get_exclude_patterns(self, repo_abspath, repo_file_paths):
"""
Returns exclude patterns for a given repo. It looks for .gitattributes files in repo_file_paths.
Resulting dictionary will contain exclude patterns per path (relative to the repo_abspath).
E.g. {('.', 'Catalyst', 'Editions', 'Base'), ['Foo*', '*Bar']}
@type repo_abspath: string
@param repo_abspath: Absolute path to the git repository.
@type repo_file_paths: list
@param repo_file_paths: List of paths relative to the repo_abspath that are under git control.
@rtype: dict
@return: Dictionary representing exclude patterns.
Keys are tuples of strings. Values are lists of strings.
Returns None if self.exclude is not set.
"""
if not self.exclude:
return None
def read_attributes(attributes_abspath):
patterns = []
if path.isfile(attributes_abspath):
attributes = open(attributes_abspath, 'r').readlines()
patterns = []
for line in attributes:
tokens = line.strip().split()
if "export-ignore" in tokens[1:]:
patterns.append(tokens[0])
return patterns
exclude_patterns = {(): []}
# There may be no gitattributes.
try:
global_attributes_abspath = self.read_shell("git config --get core.attributesfile", repo_abspath).rstrip()
exclude_patterns[()] = read_attributes(global_attributes_abspath)
except:
# And valid to not have them.
pass
for attributes_abspath in [path.join(repo_abspath, f) for f in repo_file_paths if f.endswith(".gitattributes")]:
# Each .gitattributes affects only files within its directory.
key = tuple(self.get_path_components(repo_abspath, path.dirname(attributes_abspath)))
exclude_patterns[key] = read_attributes(attributes_abspath)
local_attributes_abspath = path.join(repo_abspath, ".git", "info", "attributes")
key = tuple(self.get_path_components(repo_abspath, repo_abspath))
if key in exclude_patterns:
exclude_patterns[key].extend(read_attributes(local_attributes_abspath))
else:
exclude_patterns[key] = read_attributes(local_attributes_abspath)
return exclude_patterns
def is_file_excluded(self, repo_abspath, repo_file_path, exclude_patterns):
"""
Checks whether file at a given path is excluded.
@type repo_abspath: string
@param repo_abspath: Absolute path to the git repository.
@type repo_file_path: string
@param repo_file_path: Path to a file within repo_abspath.
@type exclude_patterns: dict
@param exclude_patterns: Exclude patterns with format specified for get_exclude_patterns.
@rtype: bool
@return: True if file should be excluded. Otherwise False.
"""
if exclude_patterns is None or not len(exclude_patterns):
return False
from fnmatch import fnmatch
file_name = path.basename(repo_file_path)
components = self.get_path_components(repo_abspath, path.join(repo_abspath, path.dirname(repo_file_path)))
is_excluded = False
# We should check all patterns specified in intermediate directories to the given file.
# At the end we should also check for the global patterns (key '()' or empty tuple).
while not is_excluded:
key = tuple(components)
if key in exclude_patterns:
patterns = exclude_patterns[key]
for p in patterns:
if fnmatch(file_name, p) or fnmatch(repo_file_path, p):
if self.verbose:
print("Exclude pattern matched {pattern}: {path}".format(pattern=p, path=repo_file_path))
is_excluded = True
if not len(components):
break
components.pop()
return is_excluded
def list_files(self, repo_path=''):
"""
An iterator method that yields a file path relative to main_repo_abspath
for each file that should be included in the archive.
Skips those that match the exclusion patterns found in
any discovered .gitattributes files along the way.
Recurs into submodules as well.
@type repo_path: string
@param repo_path: Path to the git submodule repository within the main git repository.
@rtype: iterator
@return: Iterator to traverse files under git control relative to main_repo_abspath.
"""
repo_abspath = path.join(self.main_repo_abspath, repo_path)
repo_file_paths = self.read_git_shell("git ls-files --cached --full-name --no-empty-directory", repo_abspath).splitlines()
exclude_patterns = self.get_exclude_patterns(repo_abspath, repo_file_paths)
for repo_file_path in repo_file_paths:
# Git puts path in quotes if file path has unicode characters.
repo_file_path = repo_file_path.strip('"') # file path relative to current repo
file_name = path.basename(repo_file_path)
# The symbolic link propertie is lost after extract the zip file
# Loop over files in the link dir and add them to file list
absfilepath = path.join(repo_abspath, repo_file_path)
if path.islink(absfilepath):
if path.isdir(absfilepath):
list_dirs = os.walk(absfilepath)
for root,dirs,files in list_dirs:
for file in files:
main_repo_file_path = os.path.relpath(path.join(root, file), self.main_repo_abspath)
yield main_repo_file_path
continue
# Only list symlinks and files that don't start with git.
elif path.isdir(repo_file_path):
continue
main_repo_file_path = path.join(repo_path, repo_file_path) # file path relative to the main repo
if self.is_file_excluded(repo_abspath, repo_file_path, exclude_patterns):
continue
# Yield both repo_file_path and main_repo_file_path to preserve structure of the repo.
yield main_repo_file_path
if self.force_sub:
self.run_shell("git submodule init", repo_abspath)
self.run_shell("git submodule update", repo_abspath)
# List files of every submodule.
for submodule_path in self.read_shell("git submodule --quiet foreach 'pwd'", repo_abspath).splitlines():
# In order to get output path we need to exclude repository path from submodule_path.
submodule_path = path.relpath(submodule_path, self.main_repo_abspath)
for file_path in self.list_files(submodule_path):
yield file_path
@staticmethod
def run_shell(cmd, cwd=None):
"""
Runs shell command.
@type cmd: string
@param cmd: Command to be executed.
@type cwd: string
@param cwd: Working directory.
@rtype: int
@return: Return code of the command.
@raise CalledProcessError: Raises exception if return code of the command is non-zero.
"""
p = Popen(cmd, shell=True, cwd=cwd)
p.wait()
if p.returncode:
raise CalledProcessError(returncode=p.returncode, cmd=cmd)
return p.returncode
@staticmethod
def read_shell(cmd, cwd=None, encoding='utf-8'):
"""
Runs shell command and reads output.
@type cmd: string
@param cmd: Command to be executed.
@type cwd: string
@param cwd: Working directory.
@type encoding: string
@param encoding: Encoding used to decode bytes returned by Popen into string.
@rtype: string
@return: Output of the command.
@raise CalledProcessError: Raises exception if return code of the command is non-zero.
"""
p = Popen(cmd, shell=True, stdout=PIPE, cwd=cwd)
output, _ = p.communicate()
output = output.decode(encoding)
if p.returncode:
raise CalledProcessError(returncode=p.returncode, cmd=cmd, output=output)
return output
@staticmethod
def read_git_shell(cmd, cwd=None):
"""
Runs git shell command, reads output and decodes it into unicode string
@type cmd: string
@param cmd: Command to be executed.
@type cwd: string
@param cwd: Working directory.
@rtype: string
@return: Output of the command.
@raise CalledProcessError: Raises exception if return code of the command is non-zero.
"""
p = Popen(cmd, shell=True, stdout=PIPE, cwd=cwd)
output, _ = p.communicate()
output = output.decode('unicode_escape').encode('raw_unicode_escape').decode('utf-8')
if p.returncode:
raise CalledProcessError(returncode=p.returncode, cmd=cmd, output=output)
return output
if __name__ == '__main__':
from optparse import OptionParser
parser = OptionParser(usage="usage: %prog [-v] [--prefix PREFIX] [--no-exclude] [--force-submodules] [--dry-run] OUTPUT_FILE",
version="%prog {version}".format(version=__version__))
parser.add_option('--prefix',
type='string',
dest='prefix',
default='',
help="Prepend PREFIX to each filename in the archive. OUTPUT_FILE name is used by default to avoid tarbomb.")
parser.add_option('-v', '--verbose',
action='store_true',
dest='verbose',
help='Enable verbose mode.')
parser.add_option('--no-exclude',
action='store_false',
dest='exclude',
default=True,
help="Don't read .gitattributes files for patterns containing export-ignore attrib.")
parser.add_option('--force-submodules',
action='store_true',
dest='force_sub',
help="Force a git submodule init && git submodule update at each level before iterating submodules.")
parser.add_option('--extra',
action='append',
dest='extra',
default=[],
help="Any additional files to include in the archive.")
parser.add_option('--dry-run',
action='store_true',
dest='dry_run',
help="Don't actually archive anything, just show what would be done.")
options, args = parser.parse_args()
if len(args) != 1:
parser.error("You must specify exactly one output file")
output_file_path = args[0]
if path.isdir(output_file_path):
parser.error("You cannot use directory as output")
# avoid tarbomb
if options.prefix:
options.prefix = path.join(options.prefix, '')
else:
import re
output_name = path.basename(output_file_path)
output_name = re.sub('(\.zip|\.tar|\.tgz|\.gz|\.bz2|\.tar\.gz|\.tar\.bz2)$', '', output_name) or "Archive"
options.prefix = path.join(output_name, '')
try:
archiver = GitArchiver(options.prefix,
options.verbose,
options.exclude,
options.force_sub,
options.extra)
archiver.create(output_file_path, options.dry_run)
except Exception as e:
traceback.print_exc()
parser.exit(2, "{exception}\n".format(exception=e))
sys.exit(0)

View File

@ -1,3 +1,4 @@
## axmol-migrate
please visit: https://github.com/axmolengine/axmol-migrate
visit: https://github.com/axmolengine/axmol-migrate
refer: https://github.com/axmolengine/axmol/discussions/1231

View File

@ -1,67 +0,0 @@
#!/usr/bin/python
#ConvertYCoordFlipped.py
import plistlib
import os.path
import argparse
import glob
import shutil
#keys in dictionary
metaDataKey = 'metaData'
yCoordFlippedConvertedKey = 'yCoordFlippedConverted'
yCoordFlippedKey = 'yCoordFlipped'
#check if the particle file has been converted
def checkFlippedConvertFlag(plistDict):
if(not plistDict.has_key(metaDataKey)):
return False
else:
metaDict = plistDict.get(metaDataKey)
if(not metaDict.has_key(yCoordFlippedConvertedKey)):
return False
else:
return metaDict.get(yCoordFlippedConvertedKey) is 1
#write flag to indicate to file has been converted
def writeFlippedConvertFlag(plistDict):
metaDict = dict()
metaDict.update(yCoordFlippedConverted = 1)
plistDict.update(metaData = metaDict)
#process file
def processConvertFile(filename):
#print a line to separate files
print ('')
if(not os.path.isfile(filename)):
print(filename + ' dose not exist!')
return
print('Begin process particle file: ' + filename)
fp = open(filename, 'r')
pl = plistlib.readPlist(fp)
if (not pl.has_key(yCoordFlippedKey)):
print('Skip plist file: ' + filename + ' for there is no key for yCoordFlipped,')
else:
if(not checkFlippedConvertFlag(pl)):
backupFileName = filename+'.backup'
print('Write backup file to ' + backupFileName)
shutil.copyfile(filename,backupFileName)
print('converting...')
pl[yCoordFlippedKey] = -pl[yCoordFlippedKey]
writeFlippedConvertFlag(pl)
print('converted...')
print('Write new plist file to ' + filename)
plistlib.writePlist(pl,filename)
else:
print('Skip a converted file ' + filename)
# -------------- entrance --------------
if __name__ == '__main__':
argparser = argparse.ArgumentParser()
argparser.add_argument("file", nargs = "+",help = "specify a file or a patten")
#argparser.add_argument("-r", "--recursive",action = "store_true", help = "recursive folder or not")
args = argparser.parse_args()
for file in args.file:
processConvertFile(file)

View File

@ -1,107 +0,0 @@
# Performance Test Data Convertor
## Overview
Performance Test Data Convertor is used for converting the json files to excel files. The json files are generated from project `tests/performance-tests`.
## Requirement
* Python 2.7 is required. (Python 2.7.5 is well tested)
* [xlwt](https://pypi.python.org/pypi/xlwt/) for python to write excel files.
## Generate test data
1. Change the device name in the tests source code.
Now you should modify the device name in `tests/performance-tests/Classes/Profile.cpp` before running the tests. Sample code:
```
// For different device & os, change these values
// TODO : get device info automatically
#define DEVICE_NAME "TheDeviceName"
#define OS_VERSION "SystemVersion"
```
2. Run the performance-tests project on devices.
Once the project started, you will see a scene like this:
![test-scene][test-scene.jpg]
3. Click the menu `Start AutoTest`. Then the performance tests will running automatically. Once the scene stopped at the scene again. It's mean the autotesting is end.
## Get the generated json files
After the test data generated, you can get the json file from the device.
The json file will be named with format : `PerformanceLog-[DEVICE_NAME]-[TIMESTAMP].json`.
For different OS, you can get the json files by different steps:
### From iOS devices
The json files will generated in the `Document` folder of the app. So you can get the json files by iTunes by steps:
![itunes][itunes.jpg]
### From Android devices
The json files will be generated in the path `/mnt/sdcard/PerfTest`. So you can get the json files by commands like this:
* Windows System
Run the command in cmd:
```
%ANDROID_HOME%\platform-tools\adb pull /mnt/sdcard/PerfTest C:\Users\USER_NAME\MY_LOG_FOLDER
```
* Mac System
Run the command in terminal:
```
$ANDROID_HOME/platform-tools/adb pull /mnt/sdcard/PerfTest ~/MY_LOG_FOLDER
```
### From Mac devices
The json files will be generated in the path `~/Document`.
### From Win32 devices
The json files will be generated in the AppData path. For different system versions, it will be different path.
The path will be `C:\Users\USER_NAME\AppData\Local\performance-tests` on Win8.1.
## Convert json files to excel files
When the json files are ready, you can convert the json files to excel files by `tools/performance-analyze/convertor.py`.
The usage of the tool:
```
usage: convertor.py [-h] -s SRC_PATH [-o OUTPUT_PATH]
Performance test data convertor.
optional arguments:
-h, --help show this help message and exit
-s SRC_PATH Specify the json file path or the folder path of json files.
-o OUTPUT_PATH Specify the output path of excel files.
```
Tips:
* The value of `-s` can be a json file or a folder which contains many json files. If it's a folder, then the json files in the folder will be converted to excel files one by one.
* You can specify the output path by `-o`. If it's not specified, the excel files will be placed in the same folder of json files.
* Each json file will generate an excel file. The excel file name will be same with the json file. For example:
`python convertor.py -s PerformanceLog-iphone4s-2015-09-11-1745.json` will generate a `PerformanceLog-iphone4s-2015-09-11-1745.xls` file.
* The format of the excel file will be looks like this:
![excel format][excel.jpg]
1. Each sheet shows the result of a test case.
2. The cells with GREEN background color are the conditions of the test case.
3. The cells with YELLOW background color are the result data of the test case.
[test-scene.jpg]: http://i60.tinypic.com/ou86bs.jpg
[itunes.jpg]: http://i60.tinypic.com/33z4r53.jpg
[excel.jpg]: http://i57.tinypic.com/wvx500.jpg

View File

@ -1,155 +0,0 @@
#!/usr/bin/python
#-*- coding: UTF-8 -*-
# ----------------------------------------------------------------------------
# Convert the performance test result from json files to excel.
#
# Author: Bill Zhang
#
# License: MIT
# ----------------------------------------------------------------------------
'''
Convert the performance test result from json files to excel.
'''
import xlwt
import os
import json
from argparse import ArgumentParser
DEFAULT_STYLE = 'borders: left thin, right thin, top thin, bottom thin;'
CONDITION_STYLE = 'pattern: pattern solid, fore_color light_green;'
RESULT_STYLE = 'pattern: pattern solid, fore_color light_yellow;'
BASE_KEYS = [
'osVersion',
'fileVersion',
'timeStamp',
'engineVersion',
'device'
]
KEY_CONDITION_HEADERS = "conditionHeaders"
KEY_RESULT_HEADERS = "resultHeaders"
KEY_RESULTS = "results"
START_COL_INDEX = 0
START_ROW_INDEX = 0
class KnownException(Exception):
pass
class Convertor:
def __init__(self, src_path, output_path=None):
self.src_path = self.change_to_abspath(src_path)
if not os.path.exists(self.src_path):
raise KnownException('%s is not existed!' % self.src_path)
if output_path is None:
# not specified output path, default use source path
if os.path.isfile(self.src_path):
self.output_path = os.path.dirname(self.src_path)
else:
self.output_path = self.src_path
else:
self.output_path = self.change_to_abspath(output_path)
def change_to_abspath(self, path):
ret = os.path.expanduser(path)
if not os.path.isabs(ret):
ret = os.path.abspath(ret)
ret = os.path.normpath(ret)
return ret
def get_col_width(self, col_str):
return 256 * (len(col_str) + 1)
def convert_file(self, file_path):
f = open(file_path)
testData = json.load(f)
f.close()
basename, ext = os.path.splitext(os.path.basename(file_path))
dst_file_path = os.path.join(self.output_path, "%s.xls" % basename)
if os.path.isfile(dst_file_path):
os.remove(dst_file_path)
workbook = xlwt.Workbook(encoding = 'ascii')
default_style = xlwt.Style.easyxf(DEFAULT_STYLE)
con_style = xlwt.Style.easyxf("%s%s" % (DEFAULT_STYLE, CONDITION_STYLE))
ret_style = xlwt.Style.easyxf("%s%s" % (DEFAULT_STYLE, RESULT_STYLE))
for key in testData.keys():
if key in BASE_KEYS:
continue
# create a sheet for the test case
sheetObj = workbook.add_sheet(key)
# get test case data
caseInfo = testData[key]
# Add headers for the test case
condHeaders = caseInfo[KEY_CONDITION_HEADERS]
retHeaders = caseInfo[KEY_RESULT_HEADERS]
curRow = START_ROW_INDEX
curCol = START_COL_INDEX
col_widths = {}
for header in (condHeaders + retHeaders):
sheetObj.write(curRow, curCol, header, default_style)
col_width = self.get_col_width(header)
col_widths[curCol] = col_width
sheetObj.col(curCol).width = col_width
curCol += 1
rets = caseInfo[KEY_RESULTS]
for retInfo in rets:
curRow += 1
curCol = START_COL_INDEX
for ret in retInfo:
if (curCol - START_COL_INDEX) < len(condHeaders):
use_style = con_style
else:
use_style = ret_style
sheetObj.write(curRow, curCol, ret, use_style)
new_width = self.get_col_width(ret)
old_width = col_widths[curCol]
if new_width > old_width:
sheetObj.col(curCol).width = new_width
col_widths[curCol] = new_width
curCol += 1
workbook.save(dst_file_path)
print("%s is generated." % dst_file_path)
def do_convert(self):
if not os.path.exists(self.output_path):
os.makedirs(self.output_path)
if os.path.isfile(self.src_path):
self.convert_file(self.src_path)
else:
for f in os.listdir(self.src_path):
full_path = os.path.join(self.src_path, f)
ignore, ext = os.path.splitext(f)
if os.path.isfile(full_path) and ext == '.json':
self.convert_file(full_path)
if __name__ == '__main__':
parser = ArgumentParser(description="Performance test data convertor.")
parser.add_argument('-s', dest='src_path', required=True, help='Specify the json file path or the folder path of json files.')
parser.add_argument('-o', dest='output_path', help='Specify the output path of excel files.')
(args, unknown) = parser.parse_known_args()
try:
convertor = Convertor(args.src_path, args.output_path)
convertor.do_convert()
except Exception as e:
if e.__class__.__name__ == "KnownException":
print(' '.join(e.args))
else:
raise