3 # Generate the output tree into a specified directory.
6 import argparse
, sys
, os
, errno
, shutil
, re
, subprocess
10 source_dir
= os
.path
.abspath(os
.path
.dirname(__file__
))
11 sys
.path
.append(source_dir
)
12 # and import libraries we have
13 from lib
import kconfig
, patch
, make
14 from lib
import bpgit
as git
15 from lib
import bpgpg
as gpg
16 from lib
import bpkup
as kup
17 from lib
.tempdir
import tempdir
18 from lib
import bpreqs
as reqs
20 class Bp_Identity(object):
22 folks considering multiple integrations may want to
23 consider stuffing versioning info here as well but
24 that will need thought/design on sharing compat and
27 Use the *_resafe when combining on regexps, although we currently
28 don't support regexps there perhaps later we will and this will
29 just make things safer for the output regardless. Once and if those
30 are added, how we actually use the others for regular printing will
31 need to be considered.
33 def __init__(self
, integrate
=False, kconfig_prefix
='CPTCFG_',
34 project_prefix
='', project_dir
='',
35 target_dir
='', target_dir_name
=''):
36 self
.integrate
= integrate
37 self
.kconfig_prefix
= kconfig_prefix
38 self
.kconfig_prefix_resafe
= re
.escape(kconfig_prefix
)
39 self
.project_prefix
= project_prefix
40 self
.project_prefix_resafe
= re
.escape(project_prefix
)
41 self
.full_prefix
= kconfig_prefix
+ project_prefix
42 self
.full_prefix_resafe
= re
.escape(self
.full_prefix
)
43 self
.project_dir
= project_dir
44 self
.target_dir
= target_dir
45 self
.target_dir_name
= target_dir_name
47 def read_copy_list(copyfile
):
49 Read a copy-list file and return a list of (source, target)
50 tuples. The source and target are usually the same, but in
51 the copy-list file there may be a rename included.
55 # remove leading/trailing whitespace
58 if not item
or item
[0] == '#':
61 raise Exception("Input path '%s' is absolute path, this isn't allowed" % (item
, ))
63 srcitem
, dstitem
= item
.split(' -> ')
64 if (srcitem
[-1] == '/') != (dstitem
[-1] == '/'):
65 raise Exception("Cannot copy file/dir to dir/file")
67 srcitem
= dstitem
= item
68 ret
.append((srcitem
, dstitem
))
72 def read_dependencies(depfilename
):
74 Read a (the) dependency file and return the list of
75 dependencies as a dictionary, mapping a Kconfig symbol
76 to a list of kernel version dependencies.
78 If a backported feature that an upstream backported driver
79 depends on had kconfig limitations (ie, debugging feature not
80 available) a built constaint restriction can be expressed
81 by using a kconfig expression. The kconfig expressions can
82 be specified by using the "kconfig: " prefix.
84 While reading ignore blank or commented lines.
87 depfile
= open(depfilename
, 'r')
91 if not item
or item
[0] == '#':
93 if "kconfig:" in item
:
94 sym
, kconfig_exp
= item
.split(" ", 1)
96 ret
[sym
] = [kconfig_exp
, ]
98 ret
[sym
].append(kconfig_exp
)
100 sym
, dep
= item
.split()
108 def check_output_dir(d
, clean
):
110 Check that the output directory doesn't exist or is empty,
111 unless clean is True in which case it's nuked. This helps
112 sanity check the output when generating a tree, so usually
113 running with --clean isn't suggested.
116 shutil
.rmtree(d
, ignore_errors
=True)
120 if e
.errno
!= errno
.ENOENT
:
124 def copytree(src
, dst
, symlinks
=False, ignore
=None):
126 Copy a directory tree. This differs from shutil.copytree()
127 in that it allows destination directories to already exist.
129 names
= os
.listdir(src
)
130 if ignore
is not None:
131 ignored_names
= ignore(src
, names
)
133 ignored_names
= set()
135 if not os
.path
.isdir(dst
):
139 if name
in ignored_names
:
141 srcname
= os
.path
.join(src
, name
)
142 dstname
= os
.path
.join(dst
, name
)
144 if symlinks
and os
.path
.islink(srcname
):
145 linkto
= os
.readlink(srcname
)
146 os
.symlink(linkto
, dstname
)
147 elif os
.path
.isdir(srcname
):
148 copytree(srcname
, dstname
, symlinks
, ignore
)
150 shutil
.copy2(srcname
, dstname
)
151 except (IOError, os
.error
) as why
:
152 errors
.append((srcname
, dstname
, str(why
)))
153 # catch the Error from the recursive copytree so that we can
154 # continue with other files
155 except shutil
.Error
as err
:
156 errors
.extend(err
.args
[0])
158 shutil
.copystat(src
, dst
)
160 # can't copy file access times on Windows
162 except OSError as why
:
163 errors
.extend((src
, dst
, str(why
)))
165 raise shutil
.Error(errors
)
168 def copy_files(srcpath
, copy_list
, outdir
):
170 Copy the copy_list files and directories from the srcpath
171 to the outdir. The copy_list contains source and target
174 For now, it also ignores any *~ editor backup files, though
175 this should probably be generalized (maybe using .gitignore?)
176 Similarly the code that only copies some files (*.c, *.h,
177 *.awk, Kconfig, Makefile) to avoid any build remnants in the
178 kernel if they should exist.
180 for srcitem
, tgtitem
in copy_list
:
182 copytree(srcpath
, outdir
, ignore
=shutil
.ignore_patterns('*~'))
183 elif tgtitem
[-1] == '/':
184 def copy_ignore(dir, entries
):
187 if i
[-2:] == '.o' or i
[-1] == '~':
190 copytree(os
.path
.join(srcpath
, srcitem
),
191 os
.path
.join(outdir
, tgtitem
),
195 os
.makedirs(os
.path
.join(outdir
, os
.path
.dirname(tgtitem
)))
197 # ignore dirs we might have created just now
198 if e
.errno
!= errno
.EEXIST
:
200 shutil
.copy(os
.path
.join(srcpath
, srcitem
),
201 os
.path
.join(outdir
, tgtitem
))
204 def copy_git_files(srcpath
, copy_list
, rev
, outdir
):
206 "Copy" files from a git repository. This really means listing them with
207 ls-tree and then using git show to obtain all the blobs.
209 for srcitem
, tgtitem
in copy_list
:
210 for m
, t
, h
, f
in git
.ls_tree(rev
=rev
, files
=(srcitem
,), tree
=srcpath
):
212 f
= os
.path
.join(outdir
, f
.replace(srcitem
, tgtitem
))
213 d
= os
.path
.dirname(f
)
214 if not os
.path
.exists(d
):
217 git
.get_blob(h
, outf
, tree
=srcpath
)
219 os
.chmod(f
, int(m
, 8))
221 def automatic_backport_mangle_c_file(name
):
222 return name
.replace('/', '-')
225 def add_automatic_backports(args
):
227 export
= re
.compile(r
'^EXPORT_SYMBOL(_GPL)?\((?P<sym>[^\)]*)\)')
228 bpi
= kconfig
.get_backport_info(os
.path
.join(args
.bpid
.target_dir
, 'compat', 'Kconfig'))
229 configtree
= kconfig
.ConfigTree(os
.path
.join(args
.bpid
.target_dir
, 'Kconfig'))
230 all_selects
= configtree
.all_selects()
231 for sym
, vals
in bpi
.items():
232 if sym
.startswith('BACKPORT_BUILD_'):
233 if not sym
[15:] in all_selects
:
234 disable_list
.append(sym
)
236 symtype
, module_name
, c_files
, h_files
= vals
241 files
.append((f
, os
.path
.join('compat', automatic_backport_mangle_c_file(f
))))
243 files
.append((os
.path
.join('include', f
),
244 os
.path
.join('include', os
.path
.dirname(f
), 'backport-' + os
.path
.basename(f
))))
245 if args
.git_revision
:
246 copy_git_files(args
.kerneldir
, files
, args
.git_revision
, args
.bpid
.target_dir
)
248 copy_files(args
.kerneldir
, files
, args
.bpid
.target_dir
)
250 # now add the Makefile line
251 mf
= open(os
.path
.join(args
.bpid
.target_dir
, 'compat', 'Makefile'), 'a+')
252 o_files
= [automatic_backport_mangle_c_file(f
)[:-1] + 'o' for f
in c_files
]
253 if symtype
== 'tristate':
255 raise Exception('backporting a module requires a #module-name')
257 mf
.write('%s-objs += %s\n' % (module_name
, of
))
258 mf
.write('obj-$(%s%s) += %s.o\n' % (args
.bpid
.full_prefix
, sym
, module_name
))
259 elif symtype
== 'bool':
260 mf
.write('compat-$(%s%s) += %s\n' % (args
.bpid
.full_prefix
, sym
, ' '.join(o_files
)))
262 # finally create the include file
265 for l
in open(os
.path
.join(args
.bpid
.target_dir
, 'compat',
266 automatic_backport_mangle_c_file(f
)), 'r'):
269 syms
.append(m
.group('sym'))
271 outf
= open(os
.path
.join(args
.bpid
.target_dir
, 'include', f
), 'w')
272 outf
.write('/* Automatically created during backport process */\n')
273 outf
.write('#ifndef %s%s\n' % (args
.bpid
.full_prefix
, sym
))
274 outf
.write('#include_next <%s>\n' % f
)
275 outf
.write('#else\n');
277 outf
.write('#undef %s\n' % s
)
278 outf
.write('#define %s LINUX_BACKPORT(%s)\n' % (s
, s
))
279 outf
.write('#include <%s>\n' % (os
.path
.dirname(f
) + '/backport-' + os
.path
.basename(f
), ))
280 outf
.write('#endif /* %s%s */\n' % (args
.bpid
.full_prefix
, sym
))
283 def git_debug_init(args
):
285 Initialize a git repository in the output directory and commit the current
286 code in it. This is only used for debugging the transformations this code
287 will do to the output later.
289 if not args
.gitdebug
:
291 # Git supports re-initialization, although not well documented it can
292 # reset config stuff, lets avoid that if the tree already exists.
293 if not os
.path
.exists(os
.path
.join(args
.bpid
.project_dir
, '.git')):
294 git
.init(tree
=args
.bpid
.project_dir
)
295 git
.commit_all("Copied backport", tree
=args
.bpid
.project_dir
)
298 def git_debug_snapshot(args
, name
):
300 Take a git snapshot for the debugging.
302 if not args
.gitdebug
:
304 git
.commit_all(name
, tree
=args
.bpid
.project_dir
)
306 def get_rel_spec_stable(rel
):
308 Returns release specs for a linux-stable backports based release.
311 m
= re
.match(r
"(?P<VERSION>\d+)\.+" \
312 "(?P<PATCHLEVEL>\d+)[.]*" \
313 "(?P<SUBLEVEL>\d*)" \
314 "[-rc]+(?P<RC_VERSION>\d+)\-+" \
315 "(?P<RELMOD_UPDATE>\d+)[-]*" \
316 "(?P<RELMOD_TYPE>[usnpc]*)", \
319 m
= re
.match(r
"(?P<VERSION>\d+)\.+" \
320 "(?P<PATCHLEVEL>\d+)[.]*" \
321 "(?P<SUBLEVEL>\d*)\-+" \
322 "(?P<RELMOD_UPDATE>\d+)[-]*" \
323 "(?P<RELMOD_TYPE>[usnpc]*)", \
329 def get_rel_spec_next(rel
):
331 Returns release specs for a linux-next backports based release.
333 m
= re
.match(r
"(?P<DATE_VERSION>\d+)[-]*" \
334 "(?P<RELMOD_UPDATE>\d*)[-]*" \
335 "(?P<RELMOD_TYPE>[usnpc]*)", \
341 def get_rel_prep(rel
):
343 Returns a dict with prep work details we need prior to
344 uploading a backports release to kernel.org
346 rel_specs
= get_rel_spec_stable(rel
)
351 rel_specs
= get_rel_spec_next(rel
)
353 sys
.stdout
.write("rel: %s\n" % rel
)
355 if (rel_specs
['RELMOD_UPDATE'] == '0' or
356 rel_specs
['RELMOD_UPDATE'] == '1'):
359 date
= rel_specs
['DATE_VERSION']
364 if (len(month
) != 2):
372 rel_tag
= "backports-" + rel
.replace(rel_specs
['RELMOD_TYPE'], "")
375 if (not rel_specs
['RELMOD_UPDATE']):
377 if (rel_specs
['RELMOD_UPDATE'] == '0'):
379 ignore
+= rel_specs
['RELMOD_UPDATE']
380 if (rel_specs
['RELMOD_TYPE'] != ''):
381 ignore
+= rel_specs
['RELMOD_TYPE']
382 base_rel
= rel
.replace(ignore
, "")
383 paths
.append("v" + base_rel
)
384 rel_tag
= "v" + rel
.replace(rel_specs
['RELMOD_TYPE'], "")
386 rel_prep
= dict(stable
= is_stable
,
387 expected_tag
= rel_tag
,
388 paths_to_create
= paths
)
391 def create_tar_and_gz(tar_name
, dir_to_tar
):
393 We need both a tar file and gzip for kernel.org, the tar file
394 gets signed, then we upload the compressed version, kup-server
395 in the backend decompresses and verifies the tarball against
398 basename
= os
.path
.basename(dir_to_tar
)
399 tar
= tarfile
.open(tar_name
, "w")
400 tar
.add(dir_to_tar
, basename
)
403 tar_file
= open(tar_name
, "r")
405 gz_file
= gzip
.GzipFile(tar_name
+ ".gz", 'wb')
406 gz_file
.write(tar_file
.read())
409 def upload_release(args
, rel_prep
, logwrite
=lambda x
:None):
411 Given a path of a relase make tarball out of it, PGP sign it, and
412 then upload it to kernel.org using kup.
414 The linux-next based release do not require a RELMOD_UPDATE
415 given that typically only one release is made per day. Using
416 RELMOD_UPDATE for these releases is allowed though and if
417 present it must be > 1.
419 The linux-stable based releases require a RELMOD_UPDATE.
421 RELMOD_UPDATE must be numeric and > 0 just as the RC releases
424 The tree must also be tagged with the respective release, without
425 the RELMOD_TYPE. For linux-next based releases this consists of
426 backports- followed by DATE_VERSION and if RELMOD_TYPE is present.
427 For linux-stable releases this consists of v followed by the
428 full release version except the RELMOD_TYPE.
430 Uploads will not be allowed if these rules are not followed.
432 korg_path
= "/pub/linux/kernel/projects/backports"
434 if (rel_prep
['stable']):
435 korg_path
+= "/stable"
437 parent
= os
.path
.dirname(args
.bpid
.project_dir
)
438 release
= os
.path
.basename(args
.bpid
.project_dir
)
439 tar_name
= parent
+ '/' + release
+ ".tar"
440 gzip_name
= tar_name
+ ".gz"
442 create_tar_and_gz(tar_name
, args
.bpid
.project_dir
)
444 logwrite(gpg
.sign(tar_name
, extra_args
=['--armor', '--detach-sign']))
446 logwrite("------------------------------------------------------")
448 if (not args
.kup_test
):
449 logwrite("About to upload, current target path contents:")
451 logwrite("kup-test: current target path contents:")
453 logwrite(kup
.ls(path
=korg_path
))
455 for path
in rel_prep
['paths_to_create']:
456 korg_path
+= '/' + path
457 if (not args
.kup_test
):
458 logwrite("create directory: %s" % korg_path
)
459 logwrite(kup
.mkdir(korg_path
))
461 if (not args
.kup_test
):
462 logwrite("upload file %s to %s" % (gzip_name
, korg_path
))
463 logwrite(kup
.put(gzip_name
, tar_name
+ '.asc', korg_path
))
464 logwrite("\nFinished upload!\n")
465 logwrite("Target path contents:")
466 logwrite(kup
.ls(path
=korg_path
))
468 kup_cmd
= "kup put /\n\t\t%s /\n\t\t%s /\n\t\t%s" % (gzip_name
, tar_name
+ '.asc', korg_path
)
469 logwrite("kup-test: skipping cmd: %s" % kup_cmd
)
471 def apply_patches(args
, desc
, source_dir
, patch_src
, target_dir
, logwrite
=lambda x
:None):
473 Given a path of a directories of patches and SmPL patches apply
474 them on the target directory. If requested refresh patches, or test
475 a specific SmPL patch.
477 logwrite('Applying patches from %s to %s ...' % (patch_src
, target_dir
))
478 test_cocci
= args
.test_cocci
or args
.profile_cocci
479 test_cocci_found
= False
482 for root
, dirs
, files
in os
.walk(os
.path
.join(source_dir
, patch_src
)):
484 if not test_cocci
and f
.endswith('.patch'):
485 patches
.append(os
.path
.join(root
, f
))
486 if f
.endswith('.cocci'):
488 if f
not in test_cocci
:
490 test_cocci_found
= True
492 logwrite("Testing Coccinelle SmPL patch: %s" % test_cocci
)
493 elif args
.profile_cocci
:
494 logwrite("Profiling Coccinelle SmPL patch: %s" % test_cocci
)
495 sempatches
.append(os
.path
.join(root
, f
))
497 prefix_len
= len(os
.path
.join(source_dir
, patch_src
)) + 1
498 for pfile
in patches
:
499 print_name
= pfile
[prefix_len
:]
500 # read the patch file
501 p
= patch
.fromfile(pfile
)
502 # complain if it's not a patch
504 raise Exception('No patch content found in %s' % print_name
)
505 # leading / seems to be stripped?
506 if 'dev/null' in p
.items
[0].source
:
507 raise Exception('Patches creating files are not supported (in %s)' % print_name
)
508 # check if the first file the patch touches exists, if so
509 # assume the patch needs to be applied -- otherwise continue
510 patched_file
= '/'.join(p
.items
[0].source
.split('/')[1:])
511 fullfn
= os
.path
.join(target_dir
, patched_file
)
512 if not os
.path
.exists(fullfn
):
514 logwrite("Not applying %s, not needed" % print_name
)
517 logwrite("Applying patch %s" % print_name
)
520 # but for refresh, of course look at all files the patch touches
521 for patchitem
in p
.items
:
522 patched_file
= '/'.join(patchitem
.source
.split('/')[1:])
523 fullfn
= os
.path
.join(target_dir
, patched_file
)
524 shutil
.copyfile(fullfn
, fullfn
+ '.orig_file')
526 process
= subprocess
.Popen(['patch', '-p1'], stdout
=subprocess
.PIPE
,
527 stderr
=subprocess
.STDOUT
, stdin
=subprocess
.PIPE
,
528 close_fds
=True, universal_newlines
=True,
530 output
= process
.communicate(input=open(pfile
, 'r').read())[0]
531 output
= output
.split('\n')
536 logwrite('> %s' % line
)
537 if process
.returncode
!= 0:
539 logwrite("Failed to apply changes from %s" % print_name
)
541 logwrite('> %s' % line
)
542 raise Exception('Patch failed')
545 pfilef
= open(pfile
+ '.tmp', 'a')
546 pfilef
.write(p
.top_header
)
548 for patchitem
in p
.items
:
549 patched_file
= '/'.join(patchitem
.source
.split('/')[1:])
550 fullfn
= os
.path
.join(target_dir
, patched_file
)
551 process
= subprocess
.Popen(['diff', '-p', '-u', patched_file
+ '.orig_file', patched_file
,
552 '--label', 'a/' + patched_file
,
553 '--label', 'b/' + patched_file
],
554 stdout
=pfilef
, close_fds
=True,
555 universal_newlines
=True, cwd
=target_dir
)
557 os
.unlink(fullfn
+ '.orig_file')
558 if not process
.returncode
in (0, 1):
559 logwrite("Failed to diff to refresh %s" % print_name
)
561 os
.unlink(pfile
+ '.tmp')
562 raise Exception('Refresh failed')
564 os
.rename(pfile
+ '.tmp', pfile
)
566 # remove orig/rej files that patch sometimes creates
567 for root
, dirs
, files
in os
.walk(target_dir
):
569 if f
[-5:] == '.orig' or f
[-4:] == '.rej':
570 os
.unlink(os
.path
.join(root
, f
))
571 git_debug_snapshot(args
, "apply %s patch %s" % (desc
, print_name
))
574 prefix_len
= len(os
.path
.join(source_dir
, patch_src
)) + 1
576 for cocci_file
in sempatches
:
577 # Until Coccinelle picks this up
578 pycocci
= os
.path
.join(source_dir
, 'devel/pycocci')
579 cmd
= [pycocci
, cocci_file
]
580 extra_spatch_args
= []
581 if args
.profile_cocci
:
582 cmd
.append('--profile-cocci')
583 cmd
.append(os
.path
.abspath(target_dir
))
584 print_name
= cocci_file
[prefix_len
:]
586 logwrite("Applying SmPL patch %s" % print_name
)
587 sprocess
= subprocess
.Popen(cmd
,
588 stdout
=subprocess
.PIPE
, stderr
=subprocess
.STDOUT
,
589 close_fds
=True, universal_newlines
=True,
591 output
= sprocess
.communicate()[0]
593 if sprocess
.returncode
!= 0:
594 logwrite("Failed to process SmPL patch %s" % print_name
)
595 raise Exception('SmPL patch failed')
596 output
= output
.split('\n')
601 logwrite('> %s' % line
)
603 # remove cocci_backup files
604 for root
, dirs
, files
in os
.walk(target_dir
):
606 if f
.endswith('.cocci_backup'):
607 os
.unlink(os
.path
.join(root
, f
))
608 git_debug_snapshot(args
, "apply %s SmPL patch %s" % (desc
, print_name
))
610 if test_cocci
and test_cocci_found
:
615 # Our binary requirements go here
618 req
.coccinelle('1.0.0-rc21')
619 if not req
.reqs_match():
622 # set up and parse arguments
623 parser
= argparse
.ArgumentParser(description
='generate backport tree')
624 parser
.add_argument('kerneldir', metavar
='<kernel tree>', type=str,
625 help='Kernel tree to copy drivers from')
626 parser
.add_argument('outdir', metavar
='<output directory>', type=str,
627 help='Directory to write the generated tree to')
628 parser
.add_argument('--copy-list', metavar
='<listfile>', type=argparse
.FileType('r'),
630 help='File containing list of files/directories to copy, default "copy-list"')
631 parser
.add_argument('--git-revision', metavar
='<revision>', type=str,
632 help='git commit revision (see gitrevisions(7)) to take objects from.' +
633 'If this is specified, the kernel tree is used as git object storage ' +
634 'and we use git ls-tree to get the files.')
635 parser
.add_argument('--clean', const
=True, default
=False, action
="store_const",
636 help='Clean output directory instead of erroring if it isn\'t empty')
637 parser
.add_argument('--refresh', const
=True, default
=False, action
="store_const",
638 help='Refresh patches as they are applied, the source dir will be modified!')
639 parser
.add_argument('--base-name', metavar
='<name>', type=str, default
='Linux',
640 help='name of base tree, default just "Linux"')
641 parser
.add_argument('--gitdebug', const
=True, default
=False, action
="store_const",
642 help='Use git, in the output tree, to debug the various transformation steps ' +
643 'that the tree generation makes (apply patches, ...)')
644 parser
.add_argument('--verbose', const
=True, default
=False, action
="store_const",
645 help='Print more verbose information')
646 parser
.add_argument('--extra-driver', nargs
=2, metavar
=('<source dir>', '<copy-list>'), type=str,
647 action
='append', default
=[], help='Extra driver directory/copy-list.')
648 parser
.add_argument('--kup', const
=True, default
=False, action
="store_const",
649 help='For maintainers: upload a release to kernel.org')
650 parser
.add_argument('--kup-test', const
=True, default
=False, action
="store_const",
651 help='For maintainers: do all the work as if you were about to ' +
652 'upload to kernel.org but do not do the final `kup put` ' +
653 'and also do not run any `kup mkdir` commands. This will ' +
654 'however run `kup ls` on the target paths so ' +
655 'at the very least we test your kup configuration. ' +
656 'If this is your first time uploading use this first!')
657 parser
.add_argument('--test-cocci', metavar
='<sp_file>', type=str, default
=None,
658 help='Only use the cocci file passed for Coccinelle, don\'t do anything else, ' +
659 'also creates a git repo on the target directory for easy inspection ' +
660 'of changes done by Coccinelle.')
661 parser
.add_argument('--profile-cocci', metavar
='<sp_file>', type=str, default
=None,
662 help='Only use the cocci file passed and pass --profile to Coccinelle, ' +
663 'also creates a git repo on the target directory for easy inspection ' +
664 'of changes done by Coccinelle.')
665 args
= parser
.parse_args()
667 # When building a package we use CPTCFG as we can rely on the
668 # fact that kconfig treats CONFIG_ as an environment variable
669 # requring less changes on code. For kernel integration we use
670 # the longer CONFIG_BACKPORT given that we'll be sticking to
671 # the kernel symbol namespace, to address that we do a final
672 # search / replace. Technically its possible to rely on the
673 # same prefix for packaging as with kernel integration but
674 # there are already some users of the CPTCFG prefix.
678 bpid
= Bp_Identity(integrate
= integrate
,
679 kconfig_prefix
= 'CONFIG_',
680 project_prefix
= 'BACKPORT_',
681 project_dir
= args
.outdir
,
682 target_dir
= os
.path
.join(args
.outdir
, 'backports/'),
683 target_dir_name
= 'backports/',
686 bpid
= Bp_Identity(integrate
= integrate
,
687 kconfig_prefix
= 'CPTCFG_',
689 project_dir
= args
.outdir
,
690 target_dir
= args
.outdir
,
691 target_dir_name
= '',
695 sys
.stdout
.write(msg
)
696 sys
.stdout
.write('\n')
699 return process(args
.kerneldir
, args
.copy_list
,
700 git_revision
=args
.git_revision
,
703 refresh
=args
.refresh
, base_name
=args
.base_name
,
704 gitdebug
=args
.gitdebug
, verbose
=args
.verbose
,
705 extra_driver
=args
.extra_driver
,
707 kup_test
=args
.kup_test
,
708 test_cocci
=args
.test_cocci
,
709 profile_cocci
=args
.profile_cocci
,
712 def process(kerneldir
, copy_list_file
, git_revision
=None,
714 clean
=False, refresh
=False, base_name
="Linux", gitdebug
=False,
715 verbose
=False, extra_driver
=[], kup
=False,
719 logwrite
=lambda x
:None,
720 git_tracked_version
=False):
722 def __init__(self
, kerneldir
, copy_list_file
,
723 git_revision
, bpid
, clean
, refresh
, base_name
,
724 gitdebug
, verbose
, extra_driver
, kup
,
728 self
.kerneldir
= kerneldir
729 self
.copy_list
= copy_list_file
730 self
.git_revision
= git_revision
733 self
.refresh
= refresh
734 self
.base_name
= base_name
735 self
.gitdebug
= gitdebug
736 self
.verbose
= verbose
737 self
.extra_driver
= extra_driver
739 self
.kup_test
= kup_test
740 self
.test_cocci
= test_cocci
741 self
.profile_cocci
= profile_cocci
742 if self
.test_cocci
or self
.profile_cocci
:
744 def git_paranoia(tree
=None, logwrite
=lambda x
:None):
745 data
= git
.paranoia(tree
)
747 logwrite('Cannot use %s' % tree
)
748 logwrite('%s' % data
['output'])
751 logwrite('Validated tree: %s' % tree
)
753 args
= Args(kerneldir
, copy_list_file
,
754 git_revision
, bpid
, clean
, refresh
, base_name
,
755 gitdebug
, verbose
, extra_driver
, kup
, kup_test
,
756 test_cocci
, profile_cocci
)
759 # start processing ...
760 if (args
.kup
or args
.kup_test
):
761 git_paranoia(source_dir
, logwrite
)
762 git_paranoia(kerneldir
, logwrite
)
764 rel_describe
= git
.describe(rev
=None, tree
=source_dir
, extra_args
=['--dirty'])
765 release
= os
.path
.basename(bpid
.target_dir
)
766 version
= release
.replace("backports-", "")
768 rel_prep
= get_rel_prep(version
)
770 logwrite('Invalid backports release name: %s' % release
)
771 logwrite('For rules on the release name see upload_release()')
773 rel_type
= "linux-stable"
774 if (not rel_prep
['stable']):
775 rel_type
= "linux-next"
776 if (rel_prep
['expected_tag'] != rel_describe
):
777 logwrite('Unexpected %s based backports release tag on' % rel_type
)
778 logwrite('the backports tree tree: %s\n' % rel_describe
)
779 logwrite('You asked to make a release with this ')
780 logwrite('directory name: %s' % release
)
781 logwrite('The actual expected tag we should find on')
782 logwrite('the backports tree then is: %s\n' % rel_prep
['expected_tag'])
783 logwrite('For rules on the release name see upload_release()')
786 copy_list
= read_copy_list(args
.copy_list
)
787 deplist
= read_dependencies(os
.path
.join(source_dir
, 'dependencies'))
789 # validate output directory
790 check_output_dir(bpid
.target_dir
, args
.clean
)
793 backport_files
= [(x
, x
) for x
in [
794 'Kconfig', 'Kconfig.package.hacks',
795 'Makefile', 'Makefile.build', 'Makefile.kernel', '.gitignore',
796 'Makefile.real', 'compat/', 'backport-include/', 'kconf/',
797 'scripts/', '.blacklist.map',
799 if not args
.git_revision
:
800 logwrite('Copy original source files ...')
802 logwrite('Get original source files from git ...')
804 copy_files(os
.path
.join(source_dir
, 'backport'), backport_files
, bpid
.target_dir
)
808 if not args
.git_revision
:
809 copy_files(args
.kerneldir
, copy_list
, bpid
.target_dir
)
811 copy_git_files(args
.kerneldir
, copy_list
, args
.git_revision
, bpid
.target_dir
)
813 # FIXME: should we add a git version of this (e.g. --git-extra-driver)?
814 for src
, copy_list
in args
.extra_driver
:
815 if (args
.kup
or args
.kup_test
):
817 copy_files(src
, read_copy_list(open(copy_list
, 'r')), bpid
.target_dir
)
819 git_debug_snapshot(args
, 'Add driver sources')
821 disable_list
= add_automatic_backports(args
)
823 bpcfg
= kconfig
.ConfigTree(os
.path
.join(bpid
.target_dir
, 'compat', 'Kconfig'))
824 bpcfg
.disable_symbols(disable_list
)
825 git_debug_snapshot(args
, 'Add automatic backports')
827 apply_patches(args
, "backport", source_dir
, 'patches', bpid
.target_dir
, logwrite
)
829 # some post-processing is required
830 configtree
= kconfig
.ConfigTree(os
.path
.join(bpid
.target_dir
, 'Kconfig'))
831 orig_symbols
= configtree
.symbols()
833 logwrite('Modify Kconfig tree ...')
834 configtree
.prune_sources(ignore
=['Kconfig.kernel', 'Kconfig.versions'])
835 git_debug_snapshot(args
, "prune Kconfig tree")
837 if not bpid
.integrate
:
838 configtree
.force_tristate_modular()
839 git_debug_snapshot(args
, "force tristate options modular")
841 configtree
.modify_selects()
842 git_debug_snapshot(args
, "convert select to depends on")
844 # write the versioning file
845 if git_tracked_version
:
846 backports_version
= "(see git)"
847 kernel_version
= "(see git)"
849 backports_version
= git
.describe(tree
=source_dir
, extra_args
=['--long'])
850 kernel_version
= git
.describe(rev
=args
.git_revision
or 'HEAD',
852 extra_args
=['--long'])
853 f
= open(os
.path
.join(bpid
.target_dir
, 'versions'), 'w')
854 f
.write('BACKPORTS_VERSION="%s"\n' % backports_version
)
855 f
.write('BACKPORTED_KERNEL_VERSION="%s"\n' % kernel_version
)
856 f
.write('BACKPORTED_KERNEL_NAME="%s"\n' % args
.base_name
)
857 if git_tracked_version
:
858 f
.write('BACKPORTS_GIT_TRACKED="backport tracker ID: $(shell git rev-parse HEAD 2>/dev/null || echo \'not built in git tree\')"\n')
860 git_debug_snapshot(args
, "add versions files")
862 symbols
= configtree
.symbols()
864 # write local symbol list -- needed during packaging build
865 if not bpid
.integrate
:
866 f
= open(os
.path
.join(bpid
.project_dir
, '.local-symbols'), 'w')
868 f
.write('%s=\n' % sym
)
870 git_debug_snapshot(args
, "add symbols files")
872 # add defconfigs that we want
873 defconfigs_dir
= os
.path
.join(source_dir
, 'backport', 'defconfigs')
874 os
.mkdir(os
.path
.join(bpid
.target_dir
, 'defconfigs'))
875 for dfbase
in os
.listdir(defconfigs_dir
):
876 copy_defconfig
= True
877 dfsrc
= os
.path
.join(defconfigs_dir
, dfbase
)
878 for line
in open(dfsrc
, 'r'):
883 if sym
+ '=' in line
:
887 copy_defconfig
= False
890 shutil
.copy(dfsrc
, os
.path
.join(bpid
.target_dir
, 'defconfigs', dfbase
))
892 git_debug_snapshot(args
, "add (useful) defconfig files")
894 logwrite('Rewrite Makefiles and Kconfig files ...')
896 # rewrite Makefile and source symbols
899 for some_symbols
in [orig_symbols
[i
:i
+ 50] for i
in range(0, len(orig_symbols
), 50)]:
900 r
= 'CONFIG_((' + '|'.join([s
+ '(_MODULE)?' for s
in some_symbols
]) + ')([^A-Za-z0-9_]|$))'
901 regexes
.append(re
.compile(r
, re
.MULTILINE
))
902 for root
, dirs
, files
in os
.walk(bpid
.target_dir
):
903 # don't go into .git dir (possible debug thing)
907 data
= open(os
.path
.join(root
, f
), 'r').read()
909 data
= r
.sub(r
'' + bpid
.full_prefix
+ '\\1', data
)
910 data
= re
.sub(r
'\$\(srctree\)', '$(backport_srctree)', data
)
911 data
= re
.sub(r
'-Idrivers', '-I$(backport_srctree)/drivers', data
)
913 data
= re
.sub(r
'CPTCFG_', bpid
.full_prefix
, data
)
914 fo
= open(os
.path
.join(root
, f
), 'w')
918 git_debug_snapshot(args
, "rename config symbol / srctree usage")
920 # disable unbuildable Kconfig symbols and stuff Makefiles that doesn't exist
921 maketree
= make
.MakeTree(os
.path
.join(bpid
.target_dir
, 'Makefile.kernel'))
923 disable_makefile
= []
924 for sym
in maketree
.get_impossible_symbols():
925 disable_kconfig
.append(sym
[7:])
926 disable_makefile
.append(sym
[7:])
928 configtree
.disable_symbols(disable_kconfig
)
929 git_debug_snapshot(args
, "disable impossible kconfig symbols")
931 # add kernel version dependencies to Kconfig, from the dependency list
933 for sym
in tuple(deplist
.keys()):
935 for dep
in deplist
[sym
]:
936 if "kconfig:" in dep
:
937 kconfig_expr
= dep
.replace('kconfig: ', '')
938 new
.append(kconfig_expr
)
939 elif (dep
== "DISABLE"):
940 new
.append('BACKPORT_DISABLED_KCONFIG_OPTION')
942 new
.append('!BACKPORT_KERNEL_%s' % dep
.replace('.', '_'))
944 deplist
[sym
] = ["BACKPORT_" + x
for x
in new
]
947 configtree
.add_dependencies(deplist
)
948 git_debug_snapshot(args
, "add kernel version dependencies")
950 # disable things in makefiles that can't be selected and that the
951 # build shouldn't recurse into because they don't exist -- if we
952 # don't do that then a symbol from the kernel could cause the build
953 # to attempt to recurse and fail
955 # Note that we split the regex after 50 symbols, this is because of a
956 # limitation in the regex implementation (it only supports 100 nested
957 # groups -- 50 seemed safer and is still fast)
959 for some_symbols
in [disable_makefile
[i
:i
+ 50] for i
in range(0, len(disable_makefile
), 50)]:
960 r
= '^([^#].*((' + bpid
.full_prefix_resafe
+ '|CONFIG_)(' + '|'.join([s
for s
in some_symbols
]) + ')))'
961 regexes
.append(re
.compile(r
, re
.MULTILINE
))
962 for f
in maketree
.get_makefiles():
963 data
= open(f
, 'r').read()
965 data
= r
.sub(r
'#\1', data
)
969 git_debug_snapshot(args
, "disable unsatisfied Makefile parts")
971 if (args
.kup
or args
.kup_test
):
974 if not req
.reqs_match():
976 upload_release(args
, rel_prep
, logwrite
=logwrite
)
981 if __name__
== '__main__':