gri3-wm/testcases/complete-run.pl

400 lines
11 KiB
Perl
Raw Normal View History

#!/usr/bin/env perl
# vim:ts=4:sw=4:expandtab
# © 2010-2012 Michael Stapelberg and contributors
package complete_run;
use strict;
use warnings;
use v5.10;
use utf8;
# the following are modules which ship with Perl (>= 5.10):
2011-11-07 22:21:51 +01:00
use Pod::Usage;
use Cwd qw(abs_path);
use File::Temp qw(tempfile tempdir);
use Getopt::Long;
use POSIX ();
use TAP::Harness;
use TAP::Parser;
use TAP::Parser::Aggregator;
use Time::HiRes qw(time);
use IO::Handle;
# these are shipped with the testsuite
use lib qw(lib);
use StartXDummy;
use StatusLine;
use TestWorker;
# the following modules are not shipped with Perl
use AnyEvent;
use AnyEvent::Util;
use AnyEvent::Handle;
use AnyEvent::I3 qw(:all);
use X11::XCB::Connection;
use JSON::XS; # AnyEvent::I3 depends on it, too.
binmode STDOUT, ':utf8';
binmode STDERR, ':utf8';
# Close superfluous file descriptors which were passed by running in a VIM
# subshell or situations like that.
AnyEvent::Util::close_all_fds_except(0, 1, 2);
# convinience wrapper to write to the log file
my $log;
sub Log { say $log "@_" }
my %timings;
2011-11-07 22:21:51 +01:00
my $help = 0;
# Number of tests to run in parallel. Important to know how many Xdummy
# instances we need to start (unless @displays are given). Defaults to
# num_cores * 2.
my $parallel = undef;
my @displays = ();
my %options = (
valgrind => 0,
strace => 0,
xtrace => 0,
coverage => 0,
restart => 0,
);
my $keep_xdummy_output = 0;
my $result = GetOptions(
"coverage-testing" => \$options{coverage},
"keep-xdummy-output" => \$keep_xdummy_output,
"valgrind" => \$options{valgrind},
"strace" => \$options{strace},
"xtrace" => \$options{xtrace},
"display=s" => \@displays,
"parallel=i" => \$parallel,
2011-11-07 22:21:51 +01:00
"help|?" => \$help,
);
pod2usage(-verbose => 2, -exitcode => 0) if $help;
2011-11-07 22:21:51 +01:00
# Check for missing executables
my @binaries = qw(
../i3
../i3bar/i3bar
../i3-config-wizard/i3-config-wizard
../i3-dump-log/i3-dump-log
../i3-input/i3-input
../i3-msg/i3-msg
../i3-nagbar/i3-nagbar
);
foreach my $binary (@binaries) {
die "$binary executable not found, did you run “make”?" unless -e $binary;
die "$binary is not an executable" unless -x $binary;
}
@displays = split(/,/, join(',', @displays));
@displays = map { s/ //g; $_ } @displays;
# 2: get a list of all testcases
my @testfiles = @ARGV;
# if no files were passed on command line, run all tests from t/
@testfiles = <t/*.t> if @testfiles == 0;
my $numtests = scalar @testfiles;
# No displays specified, lets start some Xdummy instances.
if (@displays == 0) {
@displays = start_xdummy($parallel, $numtests, $keep_xdummy_output);
}
2011-11-27 17:59:25 +01:00
# 1: create an output directory for this test-run
my $outdir = "testsuite-";
$outdir .= POSIX::strftime("%Y-%m-%d-%H-%M-%S-", localtime());
$outdir .= `git describe --tags`;
chomp($outdir);
mkdir($outdir) or die "Could not create $outdir";
unlink("latest") if -e "latest";
symlink("$outdir", "latest") or die "Could not symlink latest to $outdir";
# connect to all displays for two reasons:
# 1: check if the display actually works
# 2: keep the connection open so that i3 is not the only client. this prevents
# the X server from exiting (Xdummy will restart it, but not quick enough
# sometimes)
my @single_worker;
for my $display (@displays) {
my $screen;
my $x = X11::XCB::Connection->new(display => $display);
if ($x->has_error) {
die "Could not connect to display $display\n";
} else {
# start a TestWorker for each display
push @single_worker, worker($display, $x, $outdir, \%options);
}
}
# Read previous timing information, if available. We will be able to roughly
# predict the test duration and schedule a good order for the tests.
my $timingsjson = StartXDummy::slurp('.last_run_timings.json');
%timings = %{decode_json($timingsjson)} if length($timingsjson) > 0;
# Re-order the files so that those which took the longest time in the previous
# run will be started at the beginning to not delay the whole run longer than
# necessary.
@testfiles = map { $_->[0] }
sort { $b->[1] <=> $a->[1] }
map { [$_, $timings{$_} // 999] } @testfiles;
# Run 000-load-deps.t first to bail out early when dependencies are missing.
my $loadtest = "t/000-load-deps.t";
if ($loadtest ~~ @testfiles) {
@testfiles = ($loadtest, grep { $_ ne $loadtest } @testfiles);
}
printf("\nRough time estimate for this run: %.2f seconds\n\n", $timings{GLOBAL})
if exists($timings{GLOBAL});
# Forget the old timings, we dont necessarily run the same set of tests as
# before. Otherwise we would end up with left-overs.
%timings = (GLOBAL => time());
my $logfile = "$outdir/complete-run.log";
open $log, '>', $logfile or die "Could not create '$logfile': $!";
$log->autoflush(1);
say "Writing logfile to '$logfile'...";
# 3: run all tests
my @done;
my $num = @testfiles;
my $harness = TAP::Harness->new({ });
my $aggregator = TAP::Parser::Aggregator->new();
$aggregator->start();
status_init(displays => \@displays, tests => $num);
my $single_cv = AE::cv;
# We start tests concurrently: For each display, one test gets started. Every
# test starts another test after completing.
for (@single_worker) {
$single_cv->begin;
take_job($_, $single_cv, \@testfiles);
}
$single_cv->recv;
$aggregator->stop();
# print empty lines to seperate failed tests from statuslines
print "\n\n";
for (@done) {
my ($test, $output) = @$_;
say "no output for $test" unless $output;
Log "output for $test:";
Log $output;
# print error messages of failed tests
say for $output =~ /^not ok.+\n+((?:^#.+\n)+)/mg
}
# 4: print summary
$harness->summary($aggregator);
close $log;
# 5: Save the timings for better scheduling/prediction next run.
$timings{GLOBAL} = time() - $timings{GLOBAL};
open(my $fh, '>', '.last_run_timings.json');
print $fh encode_json(\%timings);
close($fh);
# 6: Print the slowest test files.
my @slowest = map { $_->[0] }
sort { $b->[1] <=> $a->[1] }
map { [$_, $timings{$_}] }
grep { !/^GLOBAL$/ } keys %timings;
say '';
say 'The slowest tests are:';
printf("\t%s with %.2f seconds\n", $_, $timings{$_})
for @slowest[0..($#slowest > 4 ? 4 : $#slowest)];
# When we are running precisely one test, print the output. Makes developing
# with a single testcase easier.
if ($numtests == 1) {
say '';
say 'Test output:';
say StartXDummy::slurp($logfile);
}
END { cleanup() }
exit 0;
#
# Takes a test from the beginning of @testfiles and runs it.
#
# The TAP::Parser (which reads the test output) will get called as soon as
# there is some activity on the stdout file descriptor of the test process
# (using an AnyEvent->io watcher).
#
# When a test completes and @done contains $num entries, the $cv condvar gets
# triggered to finish testing.
#
sub take_job {
my ($worker, $cv, $tests) = @_;
my $test = shift @$tests
or return $cv->end;
my $display = $worker->{display};
Log status($display, "$test: starting");
$timings{$test} = time();
worker_next($worker, $test);
# create a TAP::Parser with an in-memory fh
my $output;
my $parser = TAP::Parser->new({
source => do { open(my $fh, '<', \$output); $fh },
});
my $ipc = $worker->{ipc};
my $w;
$w = AnyEvent->io(
fh => $ipc,
poll => 'r',
cb => sub {
state $tests_completed = 0;
state $partial = '';
sysread($ipc, my $buf, 4096) or die "sysread: $!";
if ($partial) {
$buf = $partial . $buf;
$partial = '';
}
# make sure we feed TAP::Parser complete lines so it doesn't blow up
if (substr($buf, -1, 1) ne "\n") {
my $nl = rindex($buf, "\n");
if ($nl == -1) {
$partial = $buf;
return;
}
# strip partial from buffer
$partial = substr($buf, $nl + 1, '');
}
# count lines before stripping eof-marker otherwise we might
# end up with for (1 .. 0) { } which would effectivly skip the loop
my $lines = $buf =~ tr/\n//;
my $t_eof = $buf =~ s/^$TestWorker::EOF$//m;
$output .= $buf;
for (1 .. $lines) {
my $result = $parser->next;
2012-08-12 13:46:54 +02:00
next unless defined($result);
if ($result->is_test) {
$tests_completed++;
status($display, "$test: [$tests_completed/??] ");
2012-08-12 13:46:54 +02:00
} elsif ($result->is_bailout) {
Log status($display, "$test: BAILOUT");
status_completed(scalar @done);
say "";
say "test $test bailed out: " . $result->explanation;
exit 1;
}
}
return unless $t_eof;
Log status($display, "$test: finished");
$timings{$test} = time() - $timings{$test};
status_completed(scalar @done);
$aggregator->add($test, $parser);
push @done, [ $test, $output ];
undef $w;
take_job($worker, $cv, $tests);
}
);
}
2011-11-27 17:59:25 +01:00
sub cleanup {
$_->() for our @CLEANUP;
exit;
2011-11-27 17:59:25 +01:00
}
# must be in a begin block because we C<exit 0> above
BEGIN {
$SIG{$_} = sub {
require Carp; Carp::cluck("Caught SIG$_[0]\n");
cleanup();
} for qw(INT TERM QUIT KILL PIPE)
}
2011-11-27 17:59:25 +01:00
2011-11-07 22:21:51 +01:00
__END__
=head1 NAME
complete-run.pl - Run the i3 testsuite
=head1 SYNOPSIS
complete-run.pl [files...]
=head1 EXAMPLE
To run the whole testsuite on a reasonable number of Xdummy instances (your
running X11 will not be touched), run:
./complete-run.pl
To run only a specific test (useful when developing a new feature), run:
./complete-run t/100-fullscreen.t
2011-11-07 22:21:51 +01:00
=head1 OPTIONS
=over 8
=item B<--display>
Specifies which X11 display should be used. Can be specified multiple times and
will parallelize the tests:
# Run tests on the second X server
./complete-run.pl -d :1
# Run four tests in parallel on some Xdummy servers
./complete-run.pl -d :1,:2,:3,:4
Note that it is not necessary to specify this anymore. If omitted,
complete-run.pl will start (num_cores * 2) Xdummy instances.
2011-11-07 22:21:51 +01:00
=item B<--valgrind>
Runs i3 under valgrind to find memory problems. The output will be available in
C<latest/valgrind-for-$test.log>.
=item B<--strace>
Runs i3 under strace to trace system calls. The output will be available in
C<latest/strace-for-$test.log>.
2011-11-07 22:21:51 +01:00
=item B<--xtrace>
Runs i3 under xtrace to trace X11 requests/replies. The output will be
available in C<latest/xtrace-for-$test.log>.
2011-11-07 22:21:51 +01:00
=item B<--coverage-testing>
Exits i3 cleanly (instead of kill -9) to make coverage testing work properly.
=item B<--parallel>
Number of Xdummy instances to start (if you dont want to start num_cores * 2
instances for some reason).
# Run all tests on a single Xdummy instance
./complete-run.pl -p 1