#!/usr/bin/env perl
# vim:ts=4:sw=4:expandtab
# © 2010 Michael Stapelberg and contributors
package complete_run;
use strict;
use warnings;
use v5.10;
use utf8;
# the following are modules which ship with Perl (>= 5.10):
use Pod::Usage;
use File::Temp qw(tempfile tempdir);
use Getopt::Long;
use POSIX ();
use TAP::Harness;
use TAP::Parser;
use TAP::Parser::Aggregator;
use Time::HiRes qw(time);
use IO::Handle;

# these are shipped with the testsuite
use lib qw(@abs_top_builddir@/testcases/lib @abs_top_srcdir@/testcases/lib @abs_top_srcdir@/AnyEvent-I3/blib/lib);
use i3test::Util qw(slurp);
use StartXServer;
use StatusLine;
use TestWorker;
# the following modules are not shipped with Perl
use AnyEvent;
use AnyEvent::Util;
use AnyEvent::Handle;
use AnyEvent::I3 qw(:all);
use X11::XCB::Connection;
use JSON::XS; # AnyEvent::I3 depends on it, too.

binmode STDOUT, ':utf8';
binmode STDERR, ':utf8';

# Close superfluous file descriptors which were passed by running in a VIM
# subshell or situations like that.
AnyEvent::Util::close_all_fds_except(0, 1, 2);

our @CLEANUP;

# convenience wrapper to write to the log file
my $log;
sub Log { say $log "@_" }

my %timings;
my $help = 0;
# Number of tests to run in parallel. Important to know how many Xephyr
# instances we need to start (unless @displays are given). Defaults to
# num_cores * 2.
my $parallel = undef;
my @displays = ();
my %options = (
    valgrind => 0,
    strace => 0,
    xtrace => 0,
    coverage => 0,
    restart => 0,
    xvfb => 1,
);
my $keep_xserver_output = 0;

my $result = GetOptions(
    "coverage-testing" => \$options{coverage},
    "keep-xserver-output" => \$keep_xserver_output,
    "valgrind" => \$options{valgrind},
    "strace" => \$options{strace},
    "xtrace" => \$options{xtrace},
    "xvfb!" => \$options{xvfb},
    "display=s" => \@displays,
    "parallel=i" => \$parallel,
    "help|?" => \$help,
);

pod2usage(-verbose => 2, -exitcode => 0) if $help;

# Check for missing executables
my @binaries = qw(
                   @abs_top_builddir@/i3
                   @abs_top_builddir@/i3bar/i3bar
                   @abs_top_builddir@/i3-config-wizard/i3-config-wizard
                   @abs_top_builddir@/i3-dump-log/i3-dump-log
                   @abs_top_builddir@/i3-input/i3-input
                   @abs_top_builddir@/i3-msg/i3-msg
                   @abs_top_builddir@/i3-nagbar/i3-nagbar
               );

foreach my $binary (@binaries) {
    die "$binary executable not found, did you run “make”?" unless -e $binary;
    die "$binary is not an executable" unless -x $binary;
}

my @test_binaries = qw(
                        @abs_top_builddir@/test.commands_parser
                        @abs_top_builddir@/test.config_parser
                        @abs_top_builddir@/test.inject_randr15
                    );

foreach my $binary (@test_binaries) {
    die "$binary executable not found, did you run “make check”?" unless -e $binary;
    die "$binary is not an executable" unless -x $binary;
}

$ENV{PATH} = join(':',
    '@abs_top_builddir@/i3-nagbar',
    '@abs_top_builddir@/i3-msg',
    '@abs_top_builddir@/i3-input',
    '@abs_top_builddir@/i3-dump-log',
    '@abs_top_builddir@/i3-config-wizard',
    '@abs_top_builddir@/i3bar',
    '@abs_top_builddir@',
    '@abs_top_srcdir@',
    $ENV{PATH});

qx(Xephyr -help 2>&1);
die "Xephyr was not found in your path. Please install Xephyr (xserver-xephyr on Debian)." if $?;

qx(xvfb-run --help 2>&1);
if ($? && $options{xvfb}) {
    say "xvfb-run not found, not running tests under xvfb. Install the xvfb package to speed up tests";
    $options{xvfb} = 0;
}

if ($options{xvfb}) {
    for (my $n = 99; $n < 120; $n++) {
	my $path = File::Temp::tmpnam($ENV{TMPDIR} // "/tmp", "i3-testsXXXXXX");
	if (!defined(POSIX::mkfifo($path, 0600))) {
	    die "mkfifo: $!";
	}
	my $pid = fork // die "fork: $!";
	if ($pid == 0) {
	    # Child

	    # Xvfb checks whether the parent ignores USR1 and sends USR1 to the
	    # parent when ready, so that the wait call will be interrupted.  We
	    # can’t implement this in Perl, as Perl’s waitpid transparently
	    # handles -EINTR.
	    exec('/bin/sh', '-c', qq|trap "exit" INT; trap : USR1; (trap '' USR1; exec Xvfb :$n -screen 0 640x480x8 -nolisten tcp) & PID=\$!; wait; if ! kill -0 \$PID 2>/dev/null; then echo 1:\$PID > $path; else echo 0:\$PID > $path; wait \$PID; fi|);
	    die "exec: $!";
	}
	chomp(my $kill = slurp($path));
	unlink($path);
	my ($code, $xvfbpid) = ($kill =~ m,^([0-1]):(.*)$,);
	next unless $code eq '0';

	$ENV{DISPLAY} = ":$n";
	say "Running tests under Xvfb display $ENV{DISPLAY}";

	push(@CLEANUP, sub {
	    kill(15, $xvfbpid);
	});
	last;
    }
}

@displays = split(/,/, join(',', @displays));
@displays = map { s/ //g; $_ } @displays;

# 2: get a list of all testcases
my @testfiles = @ARGV;

# if no files were passed on command line, run all tests from t/
if (scalar @testfiles == 0) {
    @testfiles = <@abs_top_srcdir@/testcases/t/*.t> if @testfiles == 0;
} else {
    @testfiles = map {
        # Fully qualify each specified file if necessary
        if (! -e $_) {
            $_ = "@abs_top_srcdir@/testcases/$_";
        }
        $_
    } @testfiles;
}

my $numtests = scalar @testfiles;

# No displays specified, let’s start some Xephyr instances.
if (@displays == 0) {
    @displays = start_xserver($parallel, $numtests, $keep_xserver_output);
}

# 1: create an output directory for this test-run
my $outdir = "testsuite-";
$outdir .= POSIX::strftime("%Y-%m-%d-%H-%M-%S-", localtime());
$outdir .= `git describe --tags`;
chomp($outdir);
mkdir($outdir) or die "Could not create $outdir";
unlink("latest") if -l "latest";
symlink("$outdir", "latest") or die "Could not symlink latest to $outdir";


# connect to all displays for two reasons:
# 1: check if the display actually works
# 2: keep the connection open so that i3 is not the only client. this prevents
#    the X server from exiting
my @single_worker;
for my $display (@displays) {
    my $screen;
    my $x = X11::XCB::Connection->new(display => $display);
    if ($x->has_error) {
        die "Could not connect to display $display\n";
    } else {
        # start a TestWorker for each display
        push @single_worker, worker($display, $x, $outdir, \%options);
    }
}

# Read previous timing information, if available. We will be able to roughly
# predict the test duration and schedule a good order for the tests.
my $timingsjson = slurp('.last_run_timings.json') if -e '.last_run_timings.json';
%timings = %{decode_json($timingsjson)} if length($timingsjson) > 0;

# Re-order the files so that those which took the longest time in the previous
# run will be started at the beginning to not delay the whole run longer than
# necessary.
@testfiles = map  { $_->[0] }
             sort { $b->[1] <=> $a->[1] }
             map  { [$_, $timings{$_} // 999] } @testfiles;

# Run 000-load-deps.t first to bail out early when dependencies are missing.
my ($loadtest) = grep { $_ =~ m,t/000-load-deps.t$, } @testfiles;
if (defined($loadtest)) {
    @testfiles = ($loadtest, grep { $_ ne $loadtest } @testfiles);
}

# Run 533-randr15.t last because it destructively modifies the RandR
# configuration of the X session, interfering with any test started afterwards.
my ($randrtest) = grep { $_ =~ m,t/533-randr15.t$, } @testfiles;
if (defined($randrtest)) {
    @testfiles = ((grep { $_ ne $randrtest } @testfiles), $randrtest);
}

printf("\nRough time estimate for this run: %.2f seconds\n\n", $timings{GLOBAL})
    if exists($timings{GLOBAL});

# Forget the old timings, we don’t necessarily run the same set of tests as
# before. Otherwise we would end up with left-overs.
%timings = (GLOBAL => time());

my $logfile = "$outdir/complete-run.log";
open $log, '>', $logfile or die "Could not create '$logfile': $!";
$log->autoflush(1);
say "Writing logfile to '$logfile'...";

# 3: run all tests
my @done;
my $num = @testfiles;
my $harness = TAP::Harness->new({ });

my $aggregator = TAP::Parser::Aggregator->new();
$aggregator->start();

status_init(displays => \@displays, tests => $num);

my $single_cv = AE::cv;

# We start tests concurrently: For each display, one test gets started. Every
# test starts another test after completing.
for (@single_worker) {
    $single_cv->begin;
    take_job($_, $single_cv, \@testfiles);
}

$single_cv->recv;

$aggregator->stop();

# print empty lines to separate failed tests from statuslines
print "\n\n";

for (@done) {
    my ($test, $output) = @$_;
    say "no output for $test" unless $output;
    Log "output for $test:";
    Log $output;
    # print error messages of failed tests
    say for $output =~ /^not ok.+\n+((?:^#.+\n)+)/mg
}

# 4: print summary
$harness->summary($aggregator);

close $log;

# 5: Save the timings for better scheduling/prediction next run.
$timings{GLOBAL} = time() - $timings{GLOBAL};
open(my $fh, '>', '.last_run_timings.json');
print $fh encode_json(\%timings);
close($fh);

# 6: Print the slowest test files.
my @slowest = map  { $_->[0] }
              sort { $b->[1] <=> $a->[1] }
              map  { [$_, $timings{$_}] }
              grep { !/^GLOBAL$/ } keys %timings;
say '';
say 'The slowest tests are:';
printf("\t%s with %.2f seconds\n", $_, $timings{$_})
    for @slowest[0..($#slowest > 4 ? 4 : $#slowest)];

# When we are running precisely one test, print the output. Makes developing
# with a single testcase easier.
if ($numtests == 1) {
    say '';
    say 'Test output:';
    say slurp($logfile);
}

END { cleanup() }

# Report logfiles that match “(Leak|Address)Sanitizer:”.
my @logs_with_leaks;
for my $log (<$outdir/i3-log-for-*>) {
    if (slurp($log) =~ /(Leak|Address)Sanitizer:/) {
        push @logs_with_leaks, $log;
    }
}
if (scalar @logs_with_leaks > 0) {
    say "\nThe following test logfiles contain AddressSanitizer or LeakSanitizer reports:";
    for my $log (sort @logs_with_leaks) {
        say "\t$log";
    }
}

exit ($aggregator->failed > 0);

#
# Takes a test from the beginning of @testfiles and runs it.
#
# The TAP::Parser (which reads the test output) will get called as soon as
# there is some activity on the stdout file descriptor of the test process
# (using an AnyEvent->io watcher).
#
# When a test completes and @done contains $num entries, the $cv condvar gets
# triggered to finish testing.
#
sub take_job {
    my ($worker, $cv, $tests) = @_;

    my $test = shift @$tests
        or return $cv->end;

    my $display = $worker->{display};

    Log status($display, "$test: starting");
    $timings{$test} = time();
    worker_next($worker, $test);

    # create a TAP::Parser with an in-memory fh
    my $output;
    my $parser = TAP::Parser->new({
        source => do { open(my $fh, '<', \$output); $fh },
    });

    my $ipc = $worker->{ipc};

    my $w;
    $w = AnyEvent->io(
        fh => $ipc,
        poll => 'r',
        cb => sub {
            state $tests_completed = 0;
            state $partial = '';

            sysread($ipc, my $buf, 4096) or die "sysread: $!";

            if ($partial) {
                $buf = $partial . $buf;
                $partial = '';
            }

            # make sure we feed TAP::Parser complete lines so it doesn't blow up
            if (substr($buf, -1, 1) ne "\n") {
                my $nl = rindex($buf, "\n");
                if ($nl == -1) {
                    $partial = $buf;
                    return;
                }

                # strip partial from buffer
                $partial = substr($buf, $nl + 1, '');
            }

            # count lines before stripping eof-marker otherwise we might
            # end up with for (1 .. 0) { } which would effectivly skip the loop
            my $lines = $buf =~ tr/\n//;
            my $t_eof = $buf =~ s/^$TestWorker::EOF$//m;

            $output .= $buf;

            for (1 .. $lines) {
                my $result = $parser->next;
                next unless defined($result);
                if ($result->is_test) {
                    $tests_completed++;
                    status($display, "$test: [$tests_completed/??] ");
                } elsif ($result->is_bailout) {
                    Log status($display, "$test: BAILOUT");
                    status_completed(scalar @done);
                    say "";
                    say "test $test bailed out: " . $result->explanation;
                    exit 1;
                }
            }

            return unless $t_eof;

            Log status($display, "$test: finished");
            $timings{$test} = time() - $timings{$test};
            status_completed(scalar @done);

            $aggregator->add($test, $parser);
            push @done, [ $test, $output ];

            undef $w;
            take_job($worker, $cv, $tests);
        }
    );
}

sub cleanup {
    my $exitcode = $?;
    $_->() for @CLEANUP;
    exit $exitcode;
}

# must be in a begin block because we C<exit 0> above
BEGIN {
    $SIG{$_} = sub {
        require Carp; Carp::cluck("Caught SIG$_[0]\n");
        cleanup();
    } for qw(INT TERM QUIT KILL PIPE)
}

__END__

=head1 NAME

complete-run.pl - Run the i3 testsuite

=head1 SYNOPSIS

complete-run.pl [files...]

=head1 EXAMPLE

To run the whole testsuite on a reasonable number of Xephyr instances (your
running X11 will not be touched), run:
  ./complete-run.pl

To run only a specific test (useful when developing a new feature), run:
  ./complete-run t/100-fullscreen.t

=head1 OPTIONS

=over 8

=item B<--display>

Specifies which X11 display should be used. Can be specified multiple times and
will parallelize the tests:

  # Run tests on the second X server
  ./complete-run.pl -d :1

  # Run four tests in parallel on some Xephyr servers
  ./complete-run.pl -d :1,:2,:3,:4

Note that it is not necessary to specify this anymore. If omitted,
complete-run.pl will start (num_cores * 2) Xephyr instances.

=item B<--valgrind>

Runs i3 under valgrind to find memory problems. The output will be available in
C<latest/valgrind-for-$test.log>.

=item B<--strace>

Runs i3 under strace to trace system calls. The output will be available in
C<latest/strace-for-$test.log>.

=item B<--xtrace>

Runs i3 under xtrace to trace X11 requests/replies. The output will be
available in C<latest/xtrace-for-$test.log>.

=item B<--xvfb>

=item B<--no-xvfb>

Enable or disable running tests under Xvfb. Enabled by default.

=item B<--coverage-testing>

Generates a test coverage report at C<latest/i3-coverage>. Exits i3 cleanly
during tests (instead of kill -9) to make coverage testing work properly.

=item B<--parallel>

Number of Xephyr instances to start (if you don't want to start num_cores * 2
instances for some reason).

  # Run all tests on a single Xephyr instance
  ./complete-run.pl -p 1

=back