2010-11-13 17:29:20 +01:00
|
|
|
|
#!/usr/bin/env perl
|
|
|
|
|
# vim:ts=4:sw=4:expandtab
|
2011-07-25 15:37:13 +02:00
|
|
|
|
# © 2010-2011 Michael Stapelberg and contributors
|
2011-11-29 13:48:04 +01:00
|
|
|
|
package complete_run;
|
2010-11-13 17:29:20 +01:00
|
|
|
|
use strict;
|
|
|
|
|
use warnings;
|
|
|
|
|
use v5.10;
|
2011-10-04 21:10:11 +02:00
|
|
|
|
# the following are modules which ship with Perl (>= 5.10):
|
2011-11-07 22:21:51 +01:00
|
|
|
|
use Pod::Usage;
|
2011-04-26 19:20:29 +02:00
|
|
|
|
use Cwd qw(abs_path);
|
2011-10-04 21:10:11 +02:00
|
|
|
|
use File::Temp qw(tempfile tempdir);
|
|
|
|
|
use Getopt::Long;
|
2011-11-24 14:06:55 +01:00
|
|
|
|
use POSIX ();
|
2010-11-13 17:29:20 +01:00
|
|
|
|
use TAP::Harness;
|
2011-07-25 15:37:13 +02:00
|
|
|
|
use TAP::Parser;
|
2010-11-13 17:29:20 +01:00
|
|
|
|
use TAP::Parser::Aggregator;
|
2011-12-16 17:52:32 +01:00
|
|
|
|
use Time::HiRes qw(time);
|
2011-10-04 21:55:29 +02:00
|
|
|
|
# these are shipped with the testsuite
|
|
|
|
|
use lib qw(lib);
|
2011-11-08 00:04:45 +01:00
|
|
|
|
use StartXDummy;
|
2011-11-09 22:21:10 +01:00
|
|
|
|
use StatusLine;
|
2011-11-29 13:48:04 +01:00
|
|
|
|
use TestWorker;
|
2011-10-04 21:10:11 +02:00
|
|
|
|
# the following modules are not shipped with Perl
|
|
|
|
|
use AnyEvent;
|
2011-11-20 11:51:21 +01:00
|
|
|
|
use AnyEvent::Util;
|
2011-10-04 21:10:11 +02:00
|
|
|
|
use AnyEvent::Handle;
|
2011-05-08 20:08:35 +02:00
|
|
|
|
use AnyEvent::I3 qw(:all);
|
2011-11-29 13:48:04 +01:00
|
|
|
|
use X11::XCB::Connection;
|
2011-12-16 17:52:32 +01:00
|
|
|
|
use JSON::XS; # AnyEvent::I3 depends on it, too.
|
2011-08-10 15:56:39 +02:00
|
|
|
|
|
2011-11-20 11:51:21 +01:00
|
|
|
|
# Close superfluous file descriptors which were passed by running in a VIM
|
|
|
|
|
# subshell or situations like that.
|
|
|
|
|
AnyEvent::Util::close_all_fds_except(0, 1, 2);
|
|
|
|
|
|
2011-11-09 22:21:10 +01:00
|
|
|
|
# convinience wrapper to write to the log file
|
|
|
|
|
my $log;
|
|
|
|
|
sub Log { say $log "@_" }
|
|
|
|
|
|
2011-12-16 17:52:32 +01:00
|
|
|
|
my %timings;
|
2011-11-07 22:21:51 +01:00
|
|
|
|
my $help = 0;
|
2011-11-08 00:04:45 +01:00
|
|
|
|
# Number of tests to run in parallel. Important to know how many Xdummy
|
|
|
|
|
# instances we need to start (unless @displays are given). Defaults to
|
|
|
|
|
# num_cores * 2.
|
|
|
|
|
my $parallel = undef;
|
2011-07-25 15:37:13 +02:00
|
|
|
|
my @displays = ();
|
2012-01-27 22:36:40 +01:00
|
|
|
|
my %options = (
|
|
|
|
|
valgrind => 0,
|
|
|
|
|
strace => 0,
|
|
|
|
|
coverage => 0,
|
|
|
|
|
restart => 0,
|
|
|
|
|
);
|
2011-05-08 20:08:35 +02:00
|
|
|
|
|
|
|
|
|
my $result = GetOptions(
|
2012-01-27 22:36:40 +01:00
|
|
|
|
"coverage-testing" => \$options{coverage},
|
|
|
|
|
"valgrind" => \$options{valgrind},
|
|
|
|
|
"strace" => \$options{strace},
|
2011-07-25 15:37:13 +02:00
|
|
|
|
"display=s" => \@displays,
|
2011-11-08 00:04:45 +01:00
|
|
|
|
"parallel=i" => \$parallel,
|
2011-11-07 22:21:51 +01:00
|
|
|
|
"help|?" => \$help,
|
2011-05-08 20:08:35 +02:00
|
|
|
|
);
|
|
|
|
|
|
2011-11-08 00:04:45 +01:00
|
|
|
|
pod2usage(-verbose => 2, -exitcode => 0) if $help;
|
2011-11-07 22:21:51 +01:00
|
|
|
|
|
2011-07-25 15:37:13 +02:00
|
|
|
|
@displays = split(/,/, join(',', @displays));
|
|
|
|
|
@displays = map { s/ //g; $_ } @displays;
|
|
|
|
|
|
2011-12-17 12:19:31 +01:00
|
|
|
|
# 2: get a list of all testcases
|
|
|
|
|
my @testfiles = @ARGV;
|
|
|
|
|
|
|
|
|
|
# if no files were passed on command line, run all tests from t/
|
|
|
|
|
@testfiles = <t/*.t> if @testfiles == 0;
|
|
|
|
|
|
|
|
|
|
my $numtests = scalar @testfiles;
|
|
|
|
|
|
2011-12-24 15:34:28 +01:00
|
|
|
|
# When the user specifies displays, we don’t run multi-monitor tests at all
|
|
|
|
|
# (because we don’t know which displaynumber is the X-Server with multiple
|
|
|
|
|
# monitors).
|
|
|
|
|
my $multidpy = undef;
|
|
|
|
|
|
2011-11-08 00:04:45 +01:00
|
|
|
|
# No displays specified, let’s start some Xdummy instances.
|
2011-12-24 15:34:28 +01:00
|
|
|
|
if (@displays == 0) {
|
|
|
|
|
my $dpyref;
|
|
|
|
|
($dpyref, $multidpy) = start_xdummy($parallel, $numtests);
|
|
|
|
|
@displays = @$dpyref;
|
|
|
|
|
}
|
2011-11-27 17:59:25 +01:00
|
|
|
|
|
2011-11-29 13:48:04 +01:00
|
|
|
|
# 1: create an output directory for this test-run
|
|
|
|
|
my $outdir = "testsuite-";
|
|
|
|
|
$outdir .= POSIX::strftime("%Y-%m-%d-%H-%M-%S-", localtime());
|
|
|
|
|
$outdir .= `git describe --tags`;
|
|
|
|
|
chomp($outdir);
|
|
|
|
|
mkdir($outdir) or die "Could not create $outdir";
|
|
|
|
|
unlink("latest") if -e "latest";
|
|
|
|
|
symlink("$outdir", "latest") or die "Could not symlink latest to $outdir";
|
|
|
|
|
|
2011-07-25 15:37:13 +02:00
|
|
|
|
|
2011-07-27 14:34:35 +02:00
|
|
|
|
# connect to all displays for two reasons:
|
|
|
|
|
# 1: check if the display actually works
|
|
|
|
|
# 2: keep the connection open so that i3 is not the only client. this prevents
|
|
|
|
|
# the X server from exiting (Xdummy will restart it, but not quick enough
|
|
|
|
|
# sometimes)
|
2011-12-24 15:34:28 +01:00
|
|
|
|
my @single_worker;
|
2011-07-27 14:34:35 +02:00
|
|
|
|
for my $display (@displays) {
|
2011-09-24 14:09:20 +02:00
|
|
|
|
my $screen;
|
2011-11-29 13:48:04 +01:00
|
|
|
|
my $x = X11::XCB::Connection->new(display => $display);
|
2011-09-24 14:09:20 +02:00
|
|
|
|
if ($x->has_error) {
|
2011-11-27 17:03:52 +01:00
|
|
|
|
die "Could not connect to display $display\n";
|
2011-09-24 14:09:20 +02:00
|
|
|
|
} else {
|
2011-11-29 13:48:04 +01:00
|
|
|
|
# start a TestWorker for each display
|
2012-01-27 22:36:40 +01:00
|
|
|
|
push @single_worker, worker($display, $x, $outdir, \%options);
|
2011-12-24 15:34:28 +01:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
my @multi_worker;
|
|
|
|
|
if (defined($multidpy)) {
|
|
|
|
|
my $x = X11::XCB::Connection->new(display => $multidpy);
|
|
|
|
|
if ($x->has_error) {
|
|
|
|
|
die "Could not connect to multi-monitor display $multidpy\n";
|
|
|
|
|
} else {
|
2012-01-27 22:36:40 +01:00
|
|
|
|
push @multi_worker, worker($multidpy, $x, $outdir, \%options);
|
2011-09-24 14:09:20 +02:00
|
|
|
|
}
|
2011-07-27 14:34:35 +02:00
|
|
|
|
}
|
|
|
|
|
|
2011-12-16 17:52:32 +01:00
|
|
|
|
# Read previous timing information, if available. We will be able to roughly
|
|
|
|
|
# predict the test duration and schedule a good order for the tests.
|
|
|
|
|
my $timingsjson = StartXDummy::slurp('.last_run_timings.json');
|
|
|
|
|
%timings = %{decode_json($timingsjson)} if length($timingsjson) > 0;
|
|
|
|
|
|
|
|
|
|
# Re-order the files so that those which took the longest time in the previous
|
|
|
|
|
# run will be started at the beginning to not delay the whole run longer than
|
|
|
|
|
# necessary.
|
|
|
|
|
@testfiles = map { $_->[0] }
|
|
|
|
|
sort { $b->[1] <=> $a->[1] }
|
|
|
|
|
map { [$_, $timings{$_} // 999] } @testfiles;
|
|
|
|
|
|
|
|
|
|
printf("\nRough time estimate for this run: %.2f seconds\n\n", $timings{GLOBAL})
|
|
|
|
|
if exists($timings{GLOBAL});
|
|
|
|
|
|
|
|
|
|
# Forget the old timings, we don’t necessarily run the same set of tests as
|
|
|
|
|
# before. Otherwise we would end up with left-overs.
|
|
|
|
|
%timings = (GLOBAL => time());
|
|
|
|
|
|
2011-11-09 22:21:10 +01:00
|
|
|
|
my $logfile = "$outdir/complete-run.log";
|
|
|
|
|
open $log, '>', $logfile or die "Could not create '$logfile': $!";
|
|
|
|
|
say "Writing logfile to '$logfile'...";
|
|
|
|
|
|
2010-11-13 17:29:20 +01:00
|
|
|
|
# 3: run all tests
|
2011-07-25 15:37:13 +02:00
|
|
|
|
my @done;
|
|
|
|
|
my $num = @testfiles;
|
|
|
|
|
my $harness = TAP::Harness->new({ });
|
|
|
|
|
|
2011-12-24 15:34:28 +01:00
|
|
|
|
my @single_monitor_tests = grep { m,^t/([0-9]+)-, && $1 < 500 } @testfiles;
|
|
|
|
|
my @multi_monitor_tests = grep { m,^t/([0-9]+)-, && $1 >= 500 } @testfiles;
|
|
|
|
|
|
2010-11-13 17:29:20 +01:00
|
|
|
|
my $aggregator = TAP::Parser::Aggregator->new();
|
|
|
|
|
$aggregator->start();
|
2011-07-25 15:37:13 +02:00
|
|
|
|
|
2011-12-24 15:34:28 +01:00
|
|
|
|
status_init(displays => [ @displays, $multidpy ], tests => $num);
|
2011-11-09 22:21:10 +01:00
|
|
|
|
|
2011-12-24 15:34:28 +01:00
|
|
|
|
my $single_cv = AE::cv;
|
|
|
|
|
my $multi_cv = AE::cv;
|
2011-11-29 13:48:04 +01:00
|
|
|
|
|
2011-07-25 15:37:13 +02:00
|
|
|
|
# We start tests concurrently: For each display, one test gets started. Every
|
|
|
|
|
# test starts another test after completing.
|
2011-12-24 15:34:28 +01:00
|
|
|
|
for (@single_worker) {
|
|
|
|
|
$single_cv->begin;
|
|
|
|
|
take_job($_, $single_cv, \@single_monitor_tests);
|
|
|
|
|
}
|
|
|
|
|
for (@multi_worker) {
|
|
|
|
|
$multi_cv->begin;
|
|
|
|
|
take_job($_, $multi_cv, \@multi_monitor_tests);
|
|
|
|
|
}
|
2011-07-25 15:37:13 +02:00
|
|
|
|
|
2011-12-24 15:34:28 +01:00
|
|
|
|
$single_cv->recv;
|
|
|
|
|
$multi_cv->recv;
|
2011-11-27 17:24:18 +01:00
|
|
|
|
|
|
|
|
|
$aggregator->stop();
|
|
|
|
|
|
|
|
|
|
# print empty lines to seperate failed tests from statuslines
|
|
|
|
|
print "\n\n";
|
|
|
|
|
|
|
|
|
|
for (@done) {
|
|
|
|
|
my ($test, $output) = @$_;
|
2011-11-29 13:48:04 +01:00
|
|
|
|
say "no output for $test" unless $output;
|
2011-11-27 17:24:18 +01:00
|
|
|
|
Log "output for $test:";
|
|
|
|
|
Log $output;
|
|
|
|
|
# print error messages of failed tests
|
|
|
|
|
say for $output =~ /^not ok.+\n+((?:^#.+\n)+)/mg
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
# 4: print summary
|
|
|
|
|
$harness->summary($aggregator);
|
|
|
|
|
|
|
|
|
|
close $log;
|
|
|
|
|
|
2011-12-16 17:52:32 +01:00
|
|
|
|
# 5: Save the timings for better scheduling/prediction next run.
|
|
|
|
|
$timings{GLOBAL} = time() - $timings{GLOBAL};
|
|
|
|
|
open(my $fh, '>', '.last_run_timings.json');
|
|
|
|
|
print $fh encode_json(\%timings);
|
|
|
|
|
close($fh);
|
|
|
|
|
|
|
|
|
|
# 6: Print the slowest test files.
|
|
|
|
|
my @slowest = map { $_->[0] }
|
|
|
|
|
sort { $b->[1] <=> $a->[1] }
|
|
|
|
|
map { [$_, $timings{$_}] }
|
|
|
|
|
grep { !/^GLOBAL$/ } keys %timings;
|
|
|
|
|
say '';
|
|
|
|
|
say 'The slowest tests are:';
|
2011-12-17 12:20:16 +01:00
|
|
|
|
printf("\t%s with %.2f seconds\n", $_, $timings{$_})
|
|
|
|
|
for @slowest[0..($#slowest > 4 ? 4 : $#slowest)];
|
2011-12-16 17:52:32 +01:00
|
|
|
|
|
2011-12-17 12:20:32 +01:00
|
|
|
|
# When we are running precisely one test, print the output. Makes developing
|
|
|
|
|
# with a single testcase easier.
|
|
|
|
|
if ($numtests == 1) {
|
|
|
|
|
say '';
|
|
|
|
|
say 'Test output:';
|
|
|
|
|
say StartXDummy::slurp($logfile);
|
|
|
|
|
}
|
|
|
|
|
|
2011-11-29 13:48:04 +01:00
|
|
|
|
END { cleanup() }
|
2011-11-27 17:24:18 +01:00
|
|
|
|
|
|
|
|
|
exit 0;
|
|
|
|
|
|
2011-07-25 15:37:13 +02:00
|
|
|
|
#
|
|
|
|
|
# Takes a test from the beginning of @testfiles and runs it.
|
|
|
|
|
#
|
|
|
|
|
# The TAP::Parser (which reads the test output) will get called as soon as
|
|
|
|
|
# there is some activity on the stdout file descriptor of the test process
|
|
|
|
|
# (using an AnyEvent->io watcher).
|
|
|
|
|
#
|
|
|
|
|
# When a test completes and @done contains $num entries, the $cv condvar gets
|
|
|
|
|
# triggered to finish testing.
|
|
|
|
|
#
|
|
|
|
|
sub take_job {
|
2011-12-24 15:34:28 +01:00
|
|
|
|
my ($worker, $cv, $tests) = @_;
|
2011-08-10 15:56:39 +02:00
|
|
|
|
|
2011-12-24 15:34:28 +01:00
|
|
|
|
my $test = shift @$tests
|
2011-11-10 13:48:21 +01:00
|
|
|
|
or return $cv->end;
|
2011-11-09 22:21:10 +01:00
|
|
|
|
|
2011-11-29 13:48:04 +01:00
|
|
|
|
my $display = $worker->{display};
|
2011-08-10 15:56:39 +02:00
|
|
|
|
|
2011-11-29 13:48:04 +01:00
|
|
|
|
Log status($display, "$test: starting");
|
2011-12-16 17:52:32 +01:00
|
|
|
|
$timings{$test} = time();
|
2011-11-29 13:48:04 +01:00
|
|
|
|
worker_next($worker, $test);
|
2011-07-25 15:37:13 +02:00
|
|
|
|
|
2011-11-29 13:48:04 +01:00
|
|
|
|
# create a TAP::Parser with an in-memory fh
|
2011-11-25 17:39:19 +01:00
|
|
|
|
my $output;
|
|
|
|
|
my $parser = TAP::Parser->new({
|
2011-11-29 13:48:04 +01:00
|
|
|
|
source => do { open(my $fh, '<', \$output); $fh },
|
2011-11-25 17:39:19 +01:00
|
|
|
|
});
|
2010-11-13 17:29:20 +01:00
|
|
|
|
|
2011-11-29 13:48:04 +01:00
|
|
|
|
my $ipc = $worker->{ipc};
|
2011-11-25 17:39:19 +01:00
|
|
|
|
|
2011-11-29 13:48:04 +01:00
|
|
|
|
my $w;
|
|
|
|
|
$w = AnyEvent->io(
|
|
|
|
|
fh => $ipc,
|
|
|
|
|
poll => 'r',
|
|
|
|
|
cb => sub {
|
|
|
|
|
state $tests_completed = 0;
|
|
|
|
|
state $partial = '';
|
|
|
|
|
|
|
|
|
|
sysread($ipc, my $buf, 4096) or die "sysread: $!";
|
|
|
|
|
|
|
|
|
|
if ($partial) {
|
|
|
|
|
$buf = $partial . $buf;
|
|
|
|
|
$partial = '';
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
# make sure we feed TAP::Parser complete lines so it doesn't blow up
|
|
|
|
|
if (substr($buf, -1, 1) ne "\n") {
|
|
|
|
|
my $nl = rindex($buf, "\n");
|
|
|
|
|
if ($nl == -1) {
|
|
|
|
|
$partial = $buf;
|
2011-11-25 17:39:19 +01:00
|
|
|
|
return;
|
|
|
|
|
}
|
2011-08-10 15:56:39 +02:00
|
|
|
|
|
2011-11-29 13:48:04 +01:00
|
|
|
|
# strip partial from buffer
|
|
|
|
|
$partial = substr($buf, $nl + 1, '');
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
# count lines before stripping eof-marker otherwise we might
|
|
|
|
|
# end up with for (1 .. 0) { } which would effectivly skip the loop
|
|
|
|
|
my $lines = $buf =~ tr/\n//;
|
|
|
|
|
my $t_eof = $buf =~ s/^$TestWorker::EOF$//m;
|
2011-11-25 17:39:19 +01:00
|
|
|
|
|
2011-11-29 13:48:04 +01:00
|
|
|
|
$output .= $buf;
|
2011-07-25 15:37:13 +02:00
|
|
|
|
|
2011-11-29 13:48:04 +01:00
|
|
|
|
for (1 .. $lines) {
|
|
|
|
|
my $result = $parser->next;
|
|
|
|
|
if (defined($result) and $result->is_test) {
|
|
|
|
|
$tests_completed++;
|
|
|
|
|
status($display, "$test: [$tests_completed/??] ");
|
2011-07-25 15:37:13 +02:00
|
|
|
|
}
|
2011-11-25 17:39:19 +01:00
|
|
|
|
}
|
2011-11-29 13:48:04 +01:00
|
|
|
|
|
|
|
|
|
return unless $t_eof;
|
|
|
|
|
|
|
|
|
|
Log status($display, "$test: finished");
|
2011-12-16 17:52:32 +01:00
|
|
|
|
$timings{$test} = time() - $timings{$test};
|
2011-11-29 13:48:04 +01:00
|
|
|
|
status_completed(scalar @done);
|
|
|
|
|
|
|
|
|
|
$aggregator->add($test, $parser);
|
|
|
|
|
push @done, [ $test, $output ];
|
|
|
|
|
|
|
|
|
|
undef $w;
|
2011-12-24 15:34:28 +01:00
|
|
|
|
take_job($worker, $cv, $tests);
|
2011-11-29 13:48:04 +01:00
|
|
|
|
}
|
|
|
|
|
);
|
2010-11-13 17:29:20 +01:00
|
|
|
|
}
|
2011-07-25 15:37:13 +02:00
|
|
|
|
|
2011-11-27 17:59:25 +01:00
|
|
|
|
sub cleanup {
|
|
|
|
|
$_->() for our @CLEANUP;
|
2011-11-29 13:48:04 +01:00
|
|
|
|
exit;
|
2011-11-27 17:59:25 +01:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
# must be in a begin block because we C<exit 0> above
|
2011-11-29 13:48:04 +01:00
|
|
|
|
BEGIN {
|
|
|
|
|
$SIG{$_} = sub {
|
|
|
|
|
require Carp; Carp::cluck("Caught SIG$_[0]\n");
|
|
|
|
|
cleanup();
|
|
|
|
|
} for qw(INT TERM QUIT KILL PIPE)
|
|
|
|
|
}
|
2011-11-27 17:59:25 +01:00
|
|
|
|
|
2011-11-07 22:21:51 +01:00
|
|
|
|
__END__
|
|
|
|
|
|
|
|
|
|
=head1 NAME
|
|
|
|
|
|
|
|
|
|
complete-run.pl - Run the i3 testsuite
|
|
|
|
|
|
|
|
|
|
=head1 SYNOPSIS
|
|
|
|
|
|
|
|
|
|
complete-run.pl [files...]
|
|
|
|
|
|
2011-11-08 00:04:45 +01:00
|
|
|
|
=head1 EXAMPLE
|
|
|
|
|
|
|
|
|
|
To run the whole testsuite on a reasonable number of Xdummy instances (your
|
|
|
|
|
running X11 will not be touched), run:
|
|
|
|
|
./complete-run.pl
|
|
|
|
|
|
|
|
|
|
To run only a specific test (useful when developing a new feature), run:
|
|
|
|
|
./complete-run t/100-fullscreen.t
|
|
|
|
|
|
2011-11-07 22:21:51 +01:00
|
|
|
|
=head1 OPTIONS
|
|
|
|
|
|
|
|
|
|
=over 8
|
|
|
|
|
|
|
|
|
|
=item B<--display>
|
|
|
|
|
|
|
|
|
|
Specifies which X11 display should be used. Can be specified multiple times and
|
|
|
|
|
will parallelize the tests:
|
|
|
|
|
|
|
|
|
|
# Run tests on the second X server
|
|
|
|
|
./complete-run.pl -d :1
|
|
|
|
|
|
|
|
|
|
# Run four tests in parallel on some Xdummy servers
|
|
|
|
|
./complete-run.pl -d :1,:2,:3,:4
|
|
|
|
|
|
2011-11-08 00:04:45 +01:00
|
|
|
|
Note that it is not necessary to specify this anymore. If omitted,
|
|
|
|
|
complete-run.pl will start (num_cores * 2) Xdummy instances.
|
|
|
|
|
|
2011-11-07 22:21:51 +01:00
|
|
|
|
=item B<--valgrind>
|
|
|
|
|
|
|
|
|
|
Runs i3 under valgrind to find memory problems. The output will be available in
|
2011-11-16 00:30:17 +01:00
|
|
|
|
C<latest/valgrind-for-$test.log>.
|
|
|
|
|
|
|
|
|
|
=item B<--strace>
|
|
|
|
|
|
|
|
|
|
Runs i3 under strace to trace system calls. The output will be available in
|
|
|
|
|
C<latest/strace-for-$test.log>.
|
2011-11-07 22:21:51 +01:00
|
|
|
|
|
|
|
|
|
=item B<--coverage-testing>
|
|
|
|
|
|
|
|
|
|
Exits i3 cleanly (instead of kill -9) to make coverage testing work properly.
|
2011-11-08 00:04:45 +01:00
|
|
|
|
|
|
|
|
|
=item B<--parallel>
|
|
|
|
|
|
|
|
|
|
Number of Xdummy instances to start (if you don’t want to start num_cores * 2
|
|
|
|
|
instances for some reason).
|
|
|
|
|
|
|
|
|
|
# Run all tests on a single Xdummy instance
|
|
|
|
|
./complete-run.pl -p 1
|