From b95bd102470e7b1d6e9e181671a788e8688f469c Mon Sep 17 00:00:00 2001 From: Rares Constantin Date: Tue, 23 Sep 2025 11:36:22 +0000 Subject: [PATCH 1/3] [WIP] fuzzer: attach security contexts to programs Added one experimental field that allow the user to add a list of security contexts that can be used during fuzzing. The security context is attached to the program as the first "syscall" and it will be set using `setcon`. The objective is to allow syzkaller to fuzz syscalls under specific security contexts that match the SELinux policy that is loaded on target. Signed-off-by: Rares Constantin --- executor/common_linux.h | 4 ++-- executor/executor.cc | 18 +++++++++++++++++- pkg/fuzzer/fuzzer.go | 35 +++++++++++++++++++++-------------- pkg/fuzzer/job.go | 4 ++++ pkg/mgrconfig/config.go | 3 +++ pkg/rpcserver/runner.go | 1 + prog/clone.go | 5 +++-- prog/encodingexec.go | 12 ++++++++++++ prog/prog.go | 7 ++++--- syz-manager/manager.go | 21 ++++++++++++--------- 10 files changed, 79 insertions(+), 31 deletions(-) diff --git a/executor/common_linux.h b/executor/common_linux.h index 5d477a16a812..a10ead43d37d 100644 --- a/executor/common_linux.h +++ b/executor/common_linux.h @@ -4455,7 +4455,7 @@ static void getcon(char* context, size_t context_size) // - Uses fail() instead of returning an error code static void setcon(const char* context) { - char new_context[512]; + char new_context[512] = {0}; // Attempt to write the new context int fd = open(SELINUX_CONTEXT_FILE, O_WRONLY); @@ -4470,7 +4470,7 @@ static void setcon(const char* context) close(fd); if (bytes_written != (ssize_t)strlen(context)) - failmsg("setcon: could not write entire context", "wrote=%zi, expected=%zu", bytes_written, strlen(context)); + failmsg("setcon: could not write entire context", "context: %s, wrote=%zi, expected=%zu", context, bytes_written, strlen(context)); // Validate the transition by checking the context getcon(new_context, sizeof(new_context)); diff --git a/executor/executor.cc b/executor/executor.cc index 4d95a44e6756..74f022175c5a 100644 --- a/executor/executor.cc +++ b/executor/executor.cc @@ -306,6 +306,7 @@ const uint64 instr_eof = -1; const uint64 instr_copyin = -2; const uint64 instr_copyout = -3; const uint64 instr_setprops = -4; +const uint64 instr_seccontext = -5; const uint64 arg_const = 0; const uint64 arg_addr32 = 1; @@ -970,10 +971,25 @@ void execute_one() memset(&call_props, 0, sizeof(call_props)); read_input(&input_pos); // total number of calls - for (;;) { + for (int index = 0;; index++) { uint64 call_num = read_input(&input_pos); if (call_num == instr_eof) break; +#if GOOS_linux + if (call_num == instr_seccontext) { + if (index) { + fail("seclabel instruction is not the first call\n"); + } + uint64 size = read_input(&input_pos); + char seclabel[64]{}; + memcpy(seclabel, input_pos, size); + setcon(seclabel); + input_pos += size; + + debug_verbose("applied security label: %s\n", seclabel); + continue; + } +#endif if (call_num == instr_copyin) { char* addr = (char*)(read_input(&input_pos) + SYZ_DATA_OFFSET); uint64 typ = read_input(&input_pos); diff --git a/pkg/fuzzer/fuzzer.go b/pkg/fuzzer/fuzzer.go index fdfe955182d7..ec76cd5bda0b 100644 --- a/pkg/fuzzer/fuzzer.go +++ b/pkg/fuzzer/fuzzer.go @@ -208,20 +208,23 @@ func (fuzzer *Fuzzer) processResult(req *queue.Request, res *queue.Result, flags } type Config struct { - Debug bool - Corpus *corpus.Corpus - Logf func(level int, msg string, args ...interface{}) - Snapshot bool - Coverage bool - FaultInjection bool - Comparisons bool - Collide bool - EnabledCalls map[*prog.Syscall]bool - NoMutateCalls map[int]bool - FetchRawCover bool - NewInputFilter func(call string) bool - PatchTest bool - ModeKFuzzTest bool + Debug bool + Corpus *corpus.Corpus + Logf func(level int, msg string, args ...interface{}) + Snapshot bool + Coverage bool + FaultInjection bool + Comparisons bool + Collide bool + EnabledCalls map[*prog.Syscall]bool + NoMutateCalls map[int]bool + FetchRawCover bool + Sandbox string + SandboxArg int64 + SecContexts []string + NewInputFilter func(call string) bool + PatchTest bool + ModeKFuzzTest bool } func (fuzzer *Fuzzer) triageProgCall(p *prog.Prog, info *flatrpc.CallInfo, call int, triage *map[int]*triageCall) { @@ -371,6 +374,10 @@ func (fuzzer *Fuzzer) AddCandidates(candidates []Candidate) { Stat: fuzzer.statExecCandidate, Important: true, } + req.Prog.SecContext = "" + if len(fuzzer.Config.SecContexts) != 0 { + req.Prog.SecContext = fuzzer.Config.SecContexts[0] + } fuzzer.enqueue(fuzzer.candidateQueue, req, candidate.Flags|progCandidate, 0) } } diff --git a/pkg/fuzzer/job.go b/pkg/fuzzer/job.go index bbac544f62ae..74d8580479a4 100644 --- a/pkg/fuzzer/job.go +++ b/pkg/fuzzer/job.go @@ -45,6 +45,10 @@ func genProgRequest(fuzzer *Fuzzer, rnd *rand.Rand) *queue.Request { p := fuzzer.target.Generate(rnd, fuzzer.RecommendedCalls(), fuzzer.ChoiceTable()) + p.SecContext = "" + if len(fuzzer.Config.SecContexts) != 0 { + p.SecContext = fuzzer.Config.SecContexts[0] + } return &queue.Request{ Prog: p, ExecOpts: setFlags(flatrpc.ExecFlagCollectSignal), diff --git a/pkg/mgrconfig/config.go b/pkg/mgrconfig/config.go index 45145243c98d..c365a22613dc 100644 --- a/pkg/mgrconfig/config.go +++ b/pkg/mgrconfig/config.go @@ -258,6 +258,9 @@ type Experimental struct { // Enable dynamic discovery and fuzzing of KFuzzTest targets. EnableKFuzzTest bool `json:"enable_kfuzztest"` + + // List of security contexts that can be attached to programs. + SecContexts []string `json:"seccontexts"` } type FocusArea struct { diff --git a/pkg/rpcserver/runner.go b/pkg/rpcserver/runner.go index 007363c82663..fa676a378a33 100644 --- a/pkg/rpcserver/runner.go +++ b/pkg/rpcserver/runner.go @@ -329,6 +329,7 @@ func (runner *Runner) sendRequest(req *queue.Request) error { avoid |= uint64(1 << id.Proc) } } + msg := &flatrpc.HostMessage{ Msg: &flatrpc.HostMessages{ Type: flatrpc.HostMessagesRawExecRequest, diff --git a/prog/clone.go b/prog/clone.go index d41f3c13c9d1..a790d76e8ae6 100644 --- a/prog/clone.go +++ b/prog/clone.go @@ -21,8 +21,9 @@ func (p *Prog) cloneWithMap(newargs map[*ResultArg]*ResultArg) *Prog { panic("cloning of unsafe programs is not supposed to be done") } p1 := &Prog{ - Target: p.Target, - Calls: cloneCalls(p.Calls, newargs), + Target: p.Target, + Calls: cloneCalls(p.Calls, newargs), + SecContext: p.SecContext, } p1.debugValidate() return p1 diff --git a/prog/encodingexec.go b/prog/encodingexec.go index 14466a272501..9e2e421a5541 100644 --- a/prog/encodingexec.go +++ b/prog/encodingexec.go @@ -33,6 +33,7 @@ const ( execInstrCopyin execInstrCopyout execInstrSetProps + execInstrSetSecContext ) const ( @@ -73,6 +74,10 @@ func (p *Prog) SerializeForExec() ([]byte, error) { args: make(map[Arg]argInfo), } w.write(uint64(len(p.Calls))) + // If the length of the security context is not zero, it means that it must be set on target. + if len(p.SecContext) != 0 { + w.writeSecLabel(p.SecContext) + } for _, c := range p.Calls { w.csumMap, w.csumUses = calcChecksumsCall(c) // TODO: if we propagate this error, something breaks and no coverage @@ -206,6 +211,13 @@ type argInfo struct { Ret bool } +// Add a special call for setting the security label of the program. +func (w *execContext) writeSecLabel(seclabel string) { + w.write(execInstrSetSecContext) + w.write(uint64(len(seclabel))) + w.buf = append(w.buf, []byte(seclabel)...) +} + func (w *execContext) writeCallProps(props CallProps) { w.write(execInstrSetProps) props.ForeachProp(func(_, _ string, value reflect.Value) { diff --git a/prog/prog.go b/prog/prog.go index b4c8d692d5e1..af58af117b10 100644 --- a/prog/prog.go +++ b/prog/prog.go @@ -10,9 +10,10 @@ import ( ) type Prog struct { - Target *Target - Calls []*Call - Comments []string + Target *Target + Calls []*Call + Comments []string + SecContext string // Was deserialized using Unsafe mode, so can do unsafe things. isUnsafe bool diff --git a/syz-manager/manager.go b/syz-manager/manager.go index 67af3bb29fc2..33f1c9d0ee13 100644 --- a/syz-manager/manager.go +++ b/syz-manager/manager.go @@ -1166,15 +1166,18 @@ func (mgr *Manager) MachineChecked(features flatrpc.Feature, rnd := rand.New(rand.NewSource(time.Now().UnixNano())) fuzzerObj := fuzzer.NewFuzzer(context.Background(), &fuzzer.Config{ - Corpus: mgr.corpus, - Snapshot: mgr.cfg.Snapshot, - Coverage: mgr.cfg.Cover, - FaultInjection: features&flatrpc.FeatureFault != 0, - Comparisons: features&flatrpc.FeatureComparisons != 0, - Collide: true, - EnabledCalls: enabledSyscalls, - NoMutateCalls: mgr.cfg.NoMutateCalls, - FetchRawCover: mgr.cfg.RawCover, + Corpus: mgr.corpus, + Snapshot: mgr.cfg.Snapshot, + Coverage: mgr.cfg.Cover, + FaultInjection: features&flatrpc.FeatureFault != 0, + Comparisons: features&flatrpc.FeatureComparisons != 0, + Collide: true, + EnabledCalls: enabledSyscalls, + NoMutateCalls: mgr.cfg.NoMutateCalls, + FetchRawCover: mgr.cfg.RawCover, + SecContexts: mgr.cfg.Experimental.SecContexts, + Sandbox: mgr.cfg.Sandbox, + SandboxArg: mgr.cfg.SandboxArg, Logf: func(level int, msg string, args ...interface{}) { if level != 0 { return From dfbb11ce093439809241c70fa3548471943e0b36 Mon Sep 17 00:00:00 2001 From: Rares Constantin Date: Tue, 23 Sep 2025 15:18:07 +0000 Subject: [PATCH 2/3] [WIP] fuzzer: add security context generator stub Added the security context generator in the fuzzer package which will contain the implementation for loading a binary security policy, parse it and then generate an appropiate security context based on the syscalls that are fuzzed in a program. For now, it will use the first security context provided in the `seccontexts` list if any is available. Signed-off-by: Rares Constantin --- pkg/fuzzer/fuzzer.go | 21 ++++++++++----------- pkg/fuzzer/fuzzer_test.go | 4 ++-- pkg/fuzzer/job.go | 5 +---- pkg/fuzzer/seccontextgen.go | 18 ++++++++++++++++++ pkg/kfuzztest-manager/manager.go | 2 +- pkg/manager/diff.go | 2 +- syz-manager/manager.go | 5 +---- 7 files changed, 34 insertions(+), 23 deletions(-) create mode 100644 pkg/fuzzer/seccontextgen.go diff --git a/pkg/fuzzer/fuzzer.go b/pkg/fuzzer/fuzzer.go index ec76cd5bda0b..2014340ad779 100644 --- a/pkg/fuzzer/fuzzer.go +++ b/pkg/fuzzer/fuzzer.go @@ -24,8 +24,9 @@ import ( type Fuzzer struct { Stats - Config *Config - Cover *Cover + Config *Config + Cover *Cover + SecContextGen *SecContextGenerator ctx context.Context mu sync.Mutex @@ -43,16 +44,17 @@ type Fuzzer struct { } func NewFuzzer(ctx context.Context, cfg *Config, rnd *rand.Rand, - target *prog.Target) *Fuzzer { + target *prog.Target, seclabelgen *SecContextGenerator) *Fuzzer { if cfg.NewInputFilter == nil { cfg.NewInputFilter = func(call string) bool { return true } } f := &Fuzzer{ - Stats: newStats(target), - Config: cfg, - Cover: newCover(), + Stats: newStats(target), + Config: cfg, + Cover: newCover(), + SecContextGen: seclabelgen, ctx: ctx, rnd: rnd, @@ -219,8 +221,6 @@ type Config struct { EnabledCalls map[*prog.Syscall]bool NoMutateCalls map[int]bool FetchRawCover bool - Sandbox string - SandboxArg int64 SecContexts []string NewInputFilter func(call string) bool PatchTest bool @@ -374,9 +374,8 @@ func (fuzzer *Fuzzer) AddCandidates(candidates []Candidate) { Stat: fuzzer.statExecCandidate, Important: true, } - req.Prog.SecContext = "" - if len(fuzzer.Config.SecContexts) != 0 { - req.Prog.SecContext = fuzzer.Config.SecContexts[0] + if fuzzer.SecContextGen != nil { + req.Prog.SecContext = fuzzer.SecContextGen.getSecLabel() } fuzzer.enqueue(fuzzer.candidateQueue, req, candidate.Flags|progCandidate, 0) } diff --git a/pkg/fuzzer/fuzzer_test.go b/pkg/fuzzer/fuzzer_test.go index b12d7634bcea..f93d55746cc2 100644 --- a/pkg/fuzzer/fuzzer_test.go +++ b/pkg/fuzzer/fuzzer_test.go @@ -59,7 +59,7 @@ func TestFuzz(t *testing.T) { EnabledCalls: map[*prog.Syscall]bool{ target.SyscallMap["syz_test_fuzzer1"]: true, }, - }, rand.New(testutil.RandSource(t)), target) + }, rand.New(testutil.RandSource(t)), target, nil) go func() { for { @@ -108,7 +108,7 @@ func BenchmarkFuzzer(b *testing.B) { Corpus: corpus.NewCorpus(ctx), Coverage: true, EnabledCalls: calls, - }, rand.New(rand.NewSource(time.Now().UnixNano())), target) + }, rand.New(rand.NewSource(time.Now().UnixNano())), target, nil) b.ResetTimer() b.RunParallel(func(pb *testing.PB) { diff --git a/pkg/fuzzer/job.go b/pkg/fuzzer/job.go index 74d8580479a4..b46f317be716 100644 --- a/pkg/fuzzer/job.go +++ b/pkg/fuzzer/job.go @@ -45,10 +45,7 @@ func genProgRequest(fuzzer *Fuzzer, rnd *rand.Rand) *queue.Request { p := fuzzer.target.Generate(rnd, fuzzer.RecommendedCalls(), fuzzer.ChoiceTable()) - p.SecContext = "" - if len(fuzzer.Config.SecContexts) != 0 { - p.SecContext = fuzzer.Config.SecContexts[0] - } + p.SecContext = fuzzer.SecContextGen.getSecLabel() return &queue.Request{ Prog: p, ExecOpts: setFlags(flatrpc.ExecFlagCollectSignal), diff --git a/pkg/fuzzer/seccontextgen.go b/pkg/fuzzer/seccontextgen.go new file mode 100644 index 000000000000..8453437ffd28 --- /dev/null +++ b/pkg/fuzzer/seccontextgen.go @@ -0,0 +1,18 @@ +// Copyright 2025 syzkaller project authors. All rights reserved. +// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file. + +package fuzzer + +type SecContextGenerator struct { + Sandbox string + SandboxArg int64 + SecContexts []string +} + +func (secContextGenerator *SecContextGenerator) getSecLabel() string { + var secContext string = "" + if len(secContextGenerator.SecContexts) != 0 { + secContext = secContextGenerator.SecContexts[0] + } + return secContext +} \ No newline at end of file diff --git a/pkg/kfuzztest-manager/manager.go b/pkg/kfuzztest-manager/manager.go index f728230ccc1a..5018d7e7c74b 100644 --- a/pkg/kfuzztest-manager/manager.go +++ b/pkg/kfuzztest-manager/manager.go @@ -114,7 +114,7 @@ func NewKFuzzTestManager(ctx context.Context, cfg Config) (*kFuzzTestManager, er // Don't filter anything. return true }, - }, rnd, target) + }, rnd, target, nil) // TODO: Sufficient for startup, but not ideal that we are passing a // manager config here. Would require changes to pkg/fuzzer if we wanted to diff --git a/pkg/manager/diff.go b/pkg/manager/diff.go index 8b57e580708f..17650e3f9068 100644 --- a/pkg/manager/diff.go +++ b/pkg/manager/diff.go @@ -571,7 +571,7 @@ func (kc *kernelContext) setupFuzzer(features flatrpc.Feature, syscalls map[*pro } log.Logf(level, msg, args...) }, - }, rnd, kc.cfg.Target) + }, rnd, kc.cfg.Target, nil) if kc.http != nil { kc.http.Fuzzer.Store(fuzzerObj) diff --git a/syz-manager/manager.go b/syz-manager/manager.go index 33f1c9d0ee13..68a1db19fa1e 100644 --- a/syz-manager/manager.go +++ b/syz-manager/manager.go @@ -1175,9 +1175,6 @@ func (mgr *Manager) MachineChecked(features flatrpc.Feature, EnabledCalls: enabledSyscalls, NoMutateCalls: mgr.cfg.NoMutateCalls, FetchRawCover: mgr.cfg.RawCover, - SecContexts: mgr.cfg.Experimental.SecContexts, - Sandbox: mgr.cfg.Sandbox, - SandboxArg: mgr.cfg.SandboxArg, Logf: func(level int, msg string, args ...interface{}) { if level != 0 { return @@ -1190,7 +1187,7 @@ func (mgr *Manager) MachineChecked(features flatrpc.Feature, return !mgr.saturatedCalls[call] }, ModeKFuzzTest: mgr.cfg.Experimental.EnableKFuzzTest, - }, rnd, mgr.target) + }, rnd, mgr.target, &fuzzer.SecContextGenerator{ Sandbox: mgr.cfg.Sandbox, SandboxArg: mgr.cfg.SandboxArg, SecContexts: mgr.cfg.Experimental.SecContexts }) fuzzerObj.AddCandidates(candidates) mgr.fuzzer.Store(fuzzerObj) mgr.http.Fuzzer.Store(fuzzerObj) From e05ca4cf45bdee5460ae770820d58c477e0733ef Mon Sep 17 00:00:00 2001 From: Rares Constantin Date: Thu, 25 Sep 2025 16:04:47 +0000 Subject: [PATCH 3/3] [WIP] executor: collect and return audit logs Implemented the audit log extraction per program. This is available for VMs that run only one proc and add `audit` in the experimental config. The messages are appended to the output of the program and are printed during result processing. --- executor/common_linux.h | 3 - executor/executor_runner.h | 219 ++++++++++++++++++++++++++++++++++++- pkg/flatrpc/flatrpc.fbs | 3 + pkg/flatrpc/flatrpc.go | 91 +++++++++------ pkg/flatrpc/flatrpc.h | 50 ++++++--- pkg/fuzzer/fuzzer.go | 12 ++ pkg/fuzzer/job.go | 2 + pkg/fuzzer/queue/queue.go | 1 + pkg/mgrconfig/config.go | 3 + pkg/rpcserver/rpcserver.go | 2 + pkg/rpcserver/runner.go | 5 + pkg/vminfo/vminfo.go | 1 + syz-manager/manager.go | 1 + 13 files changed, 337 insertions(+), 56 deletions(-) diff --git a/executor/common_linux.h b/executor/common_linux.h index a10ead43d37d..fe692185b940 100644 --- a/executor/common_linux.h +++ b/executor/common_linux.h @@ -4415,7 +4415,6 @@ inline int symlink(const char* old_path, const char* new_path) #define SYSTEM_UID 1000 #define SYSTEM_GID 1000 -const char* const SELINUX_CONTEXT_UNTRUSTED_APP = "u:r:untrusted_app:s0:c512,c768"; const char* const SELINUX_LABEL_APP_DATA_FILE = "u:object_r:app_data_file:s0:c512,c768"; const char* const SELINUX_CONTEXT_FILE = "/proc/thread-self/attr/current"; const char* const SELINUX_XATTR_NAME = "security.selinux"; @@ -4567,8 +4566,6 @@ static int do_sandbox_android(uint64 sandbox_arg) prctl(PR_SET_PDEATHSIG, SIGKILL, 0, 0, 0); setfilecon(".", SELINUX_LABEL_APP_DATA_FILE); - if (uid == UNTRUSTED_APP_UID) - setcon(SELINUX_CONTEXT_UNTRUSTED_APP); loop(); doexit(1); diff --git a/executor/executor_runner.h b/executor/executor_runner.h index baabf5e500c6..34188fe388b0 100644 --- a/executor/executor_runner.h +++ b/executor/executor_runner.h @@ -5,6 +5,7 @@ #include #include #include +#include #include #include @@ -17,6 +18,27 @@ #include #include +#include +#include + +constexpr int NETLINK_BUF_SIZE = 4096; + +// Helper function to open a Netlink socket for Audit +int OpenNetlinkAuditSocket() +{ + return socket(AF_NETLINK, SOCK_RAW, NETLINK_AUDIT); +} + +ssize_t ReceiveNetlinkMessage(int fd, void* buf, size_t len) +{ + return recv(fd, buf, len, 0); +} + +ssize_t SendNetlinkMessage(int fd, const void* buf, size_t len) +{ + return send(fd, buf, len, 0); +} + inline std::ostream& operator<<(std::ostream& ss, const rpc::ExecRequestRawT& req) { return ss << "id=" << req.id @@ -109,7 +131,7 @@ class Proc { public: Proc(Connection& conn, const char* bin, ProcIDPool& proc_id_pool, int& restarting, const bool& corpus_triaged, int max_signal_fd, - int cover_filter_fd, ProcOpts opts) + int cover_filter_fd, ProcOpts opts, int audit_sock) : conn_(conn), bin_(bin), proc_id_pool_(proc_id_pool), @@ -121,7 +143,8 @@ class Proc opts_(opts), req_shmem_(kMaxInput), resp_shmem_(kMaxOutput), - resp_mem_(static_cast(resp_shmem_.Mem())) + resp_mem_(static_cast(resp_shmem_.Mem())), + audit_sock_(audit_sock) { Start(); } @@ -237,6 +260,7 @@ class Proc uint64 exec_start_ = 0; uint64 wait_start_ = 0; uint64 wait_end_ = 0; + int audit_sock_ = 0; friend std::ostream& operator<<(std::ostream& ss, const Proc& proc) { @@ -251,6 +275,112 @@ class Proc return ss; } + ssize_t SendUserAuditMessage(const std::string &message_text) { + const size_t payload_len = message_text.length() + 1; + const size_t buf_len = NLMSG_SPACE(payload_len); + std::vector buf(buf_len); + memset(buf.data(), 0, buf_len); + + int fd = OpenNetlinkAuditSocket(); + if (fd < 0) { + debug("Failed to open socket to send audit message.\n"); + return -1; + } + + auto* nlh = reinterpret_cast(buf.data()); + nlh->nlmsg_len = NLMSG_LENGTH((int)payload_len); + nlh->nlmsg_type = AUDIT_USER_AVC; + nlh->nlmsg_flags = NLM_F_REQUEST; + + char* data = static_cast(NLMSG_DATA(nlh)); + strncpy(data, message_text.c_str(), payload_len); + ssize_t res = SendNetlinkMessage(fd, nlh, nlh->nlmsg_len); + close(fd); + return res; + } + + void CollectAuditLogs(std::vector* output, int64_t req_id) + { + bool prefixed = false; + ssize_t slen = 0; + char buf[NETLINK_BUF_SIZE]{}; + struct nlmsghdr* header; + std::string message; + + if (SendUserAuditMessage("PROC_END") < 0) { + debug("Failed to send PROC_END. Stopping drain."); + return; + } + + while (true) { + slen = ReceiveNetlinkMessage(audit_sock_, buf, sizeof(buf)); + if (errno == EINTR) { + continue; + } + if (slen < 0) { + fprintf(stderr, "audit: receive error: %s\n", strerror(errno)); + continue; + } + if (slen < NLMSG_LENGTH(0)) { + fprintf(stderr, "audit: message too short\n"); + continue; + } + header = (struct nlmsghdr*)buf; + message = std::string((char*)NLMSG_DATA(header), + (char*)NLMSG_DATA(header) + + (slen - sizeof(*header))); + debug("proc %d: req(%ld) - Audit message: %s\n", id_, msg_->id, message.c_str()); + if (header->nlmsg_type != AUDIT_USER_AVC) { + continue; + } + if (strstr(message.c_str(), "PROC_START")) { + break; + } + } + + // Drain the audit backlog until there is no other message + while (true) { + slen = ReceiveNetlinkMessage(audit_sock_, buf, sizeof(buf)); + if (errno == EINTR) { + continue; + } + if (slen < 0) { + fprintf(stderr, "audit: receive error: %s\n", strerror(errno)); + continue; + } + if (slen < NLMSG_LENGTH(0)) { + fprintf(stderr, "audit: message too short\n"); + continue; + } + header = (struct nlmsghdr*)buf; + message = std::string((char*)NLMSG_DATA(header), + (char*)NLMSG_DATA(header) + + (slen - sizeof(*header))); + debug("proc %d: req(%ld) - Audit message: %s\n", id_, msg_->id, message.c_str()); + if (header->nlmsg_type != AUDIT_AVC && header->nlmsg_type != AUDIT_USER_AVC) { + continue; + } + if (header->nlmsg_type == AUDIT_USER_AVC && strstr(message.c_str(), "PROC_END")) { + break; + } + if (header->nlmsg_type == AUDIT_AVC) { + const char *found = strstr(message.c_str(), "syz"); + if (!found || !strstr(found, std::to_string(msg_->id).c_str())) { + continue; + } + if (!prefixed) { + char tmp[128]; + // Add prefix to the audit messages. + snprintf(tmp, sizeof(tmp), "\nAudit messages:\n"); + output->insert(output->end(), tmp, tmp + strlen(tmp)); + prefixed = true; + } + message.append("\n"); + output->insert(output->end(), message.c_str(), message.c_str() + strlen(message.c_str())); + } + } + } + void ChangeState(State state) { if (state_ == State::Handshaking) @@ -392,6 +522,12 @@ class Proc debug("proc %d: start executing request %llu\n", id_, static_cast(msg_->id)); + if (IsSet(msg_->flags, rpc::RequestFlag::ReturnAudit)) { + if (SendUserAuditMessage("PROC_START") < 0) { + debug("Failed to send PROC START\n"); + } + } + rpc::ExecutingMessageRawT exec; exec.id = msg_->id; exec.proc_id = id_; @@ -454,6 +590,10 @@ class Proc output_.insert(output_.end(), tmp, tmp + strlen(tmp)); } } + if (IsSet(msg_->flags, rpc::RequestFlag::ReturnAudit)) { + output = &output_; + CollectAuditLogs(output, msg_->id); + } uint32 num_calls = 0; if (msg_->type == rpc::RequestType::Program) num_calls = read_input(&prog_data); @@ -556,9 +696,16 @@ class Runner proc_id_pool_.emplace(num_procs); int max_signal_fd = max_signal_ ? max_signal_->FD() : -1; int cover_filter_fd = cover_filter_ ? cover_filter_->FD() : -1; + int audit_sock = 0; + if (audit) { + audit_sock = registerForAudit(); + if (audit_sock < 0) { + debug("Failed to register as the audit sink\n"); + } + } for (int i = 0; i < num_procs; i++) procs_.emplace_back(new Proc(conn, bin, *proc_id_pool_, restarting_, corpus_triaged_, - max_signal_fd, cover_filter_fd, proc_opts_)); + max_signal_fd, cover_filter_fd, proc_opts_, audit_sock)); for (;;) Loop(); @@ -574,6 +721,7 @@ class Runner std::deque requests_; std::vector leak_frames_; int restarting_ = 0; + bool audit = false; bool corpus_triaged_ = false; ProcOpts proc_opts_{}; @@ -595,6 +743,64 @@ class Runner return ss; } + int registerForAudit() + { + struct { + struct nlmsghdr nlh; + struct audit_status status; + } req; + memset(&req, 0, sizeof(req)); + + int fd = OpenNetlinkAuditSocket(); + if (fd < 0) { + return -1; + } + + req.nlh.nlmsg_len = NLMSG_LENGTH(sizeof(struct audit_status)); + req.nlh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK; + req.nlh.nlmsg_seq = 1; + req.nlh.nlmsg_type = AUDIT_SET; + req.status.pid = getpid(); + req.status.backlog_limit = 0; + req.status.rate_limit = 0; + req.status.mask = AUDIT_STATUS_PID | AUDIT_STATUS_BACKLOG_LIMIT | AUDIT_STATUS_RATE_LIMIT; + + ssize_t sent = SendNetlinkMessage(fd, &req, req.nlh.nlmsg_len); + if (sent != req.nlh.nlmsg_len) { + close(fd); + return -1; + } + + ssize_t slen = 0; + char buf[NETLINK_BUF_SIZE]; + struct nlmsghdr* header; + do { + slen = ReceiveNetlinkMessage(fd, buf, sizeof(buf)); + if (errno == EAGAIN || errno == EINTR) { + continue; + } + if (slen < NLMSG_LENGTH(0)) { + fprintf(stderr, "audit: message too short\n"); + continue; + } + header = (struct nlmsghdr*)buf; + } while (header->nlmsg_type != NLMSG_ERROR); + + struct nlmsgerr* err; + if ((size_t)slen < NLMSG_LENGTH(sizeof(*err))) { + fprintf(stderr, "audit_listener: error message too short\n"); + close(fd); + return -1; + } + err = (struct nlmsgerr*)NLMSG_DATA(header); + if (err->error != 0) { + fprintf(stderr, "audit_listener: received error %d\n", -err->error); + close(fd); + return -1; + } + return fd; + } + void Loop() { Select select; @@ -661,6 +867,13 @@ class Runner conn_.Recv(conn_reply); if (conn_reply.debug) flag_debug = true; + if (conn_reply.audit) { + if (conn_reply.procs > 1) { + debug("extracting audit logs only supported with one proc"); + } + audit = conn_reply.procs == 1; + } + debug("connected to manager: procs=%d cover_edges=%d kernel_64_bit=%d slowdown=%d syscall_timeout=%u" " program_timeout=%u features=0x%llx\n", conn_reply.procs, conn_reply.cover_edges, conn_reply.kernel_64_bit, diff --git a/pkg/flatrpc/flatrpc.fbs b/pkg/flatrpc/flatrpc.fbs index 3876af9655da..920566cb18b9 100644 --- a/pkg/flatrpc/flatrpc.fbs +++ b/pkg/flatrpc/flatrpc.fbs @@ -50,6 +50,7 @@ table ConnectRequestRaw { table ConnectReplyRaw { debug :bool; + audit :bool; cover :bool; cover_edges :bool; kernel_64_bit :bool; @@ -128,6 +129,8 @@ enum RequestType : uint64 { enum RequestFlag : uint64 (bit_flags) { // If set, collect program output and return in output field. ReturnOutput, + // If set, collect audit logs produced by the program at the end of the output. + ReturnAudit, // If set, don't fail on program failures, instead return the error in error field. ReturnError, } diff --git a/pkg/flatrpc/flatrpc.go b/pkg/flatrpc/flatrpc.go index c7afd2c7778c..cfd0098fec71 100644 --- a/pkg/flatrpc/flatrpc.go +++ b/pkg/flatrpc/flatrpc.go @@ -291,16 +291,19 @@ type RequestFlag uint64 const ( RequestFlagReturnOutput RequestFlag = 1 - RequestFlagReturnError RequestFlag = 2 + RequestFlagReturnAudit RequestFlag = 2 + RequestFlagReturnError RequestFlag = 4 ) var EnumNamesRequestFlag = map[RequestFlag]string{ RequestFlagReturnOutput: "ReturnOutput", + RequestFlagReturnAudit: "ReturnAudit", RequestFlagReturnError: "ReturnError", } var EnumValuesRequestFlag = map[string]RequestFlag{ "ReturnOutput": RequestFlagReturnOutput, + "ReturnAudit": RequestFlagReturnAudit, "ReturnError": RequestFlagReturnError, } @@ -706,6 +709,7 @@ func ConnectRequestRawEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { type ConnectReplyRawT struct { Debug bool `json:"debug"` + Audit bool `json:"audit"` Cover bool `json:"cover"` CoverEdges bool `json:"cover_edges"` Kernel64Bit bool `json:"kernel_64_bit"` @@ -764,6 +768,7 @@ func (t *ConnectReplyRawT) Pack(builder *flatbuffers.Builder) flatbuffers.UOffse } ConnectReplyRawStart(builder) ConnectReplyRawAddDebug(builder, t.Debug) + ConnectReplyRawAddAudit(builder, t.Audit) ConnectReplyRawAddCover(builder, t.Cover) ConnectReplyRawAddCoverEdges(builder, t.CoverEdges) ConnectReplyRawAddKernel64Bit(builder, t.Kernel64Bit) @@ -780,6 +785,7 @@ func (t *ConnectReplyRawT) Pack(builder *flatbuffers.Builder) flatbuffers.UOffse func (rcv *ConnectReplyRaw) UnPackTo(t *ConnectReplyRawT) { t.Debug = rcv.Debug() + t.Audit = rcv.Audit() t.Cover = rcv.Cover() t.CoverEdges = rcv.CoverEdges() t.Kernel64Bit = rcv.Kernel64Bit() @@ -853,7 +859,7 @@ func (rcv *ConnectReplyRaw) MutateDebug(n bool) bool { return rcv._tab.MutateBoolSlot(4, n) } -func (rcv *ConnectReplyRaw) Cover() bool { +func (rcv *ConnectReplyRaw) Audit() bool { o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) if o != 0 { return rcv._tab.GetBool(o + rcv._tab.Pos) @@ -861,11 +867,11 @@ func (rcv *ConnectReplyRaw) Cover() bool { return false } -func (rcv *ConnectReplyRaw) MutateCover(n bool) bool { +func (rcv *ConnectReplyRaw) MutateAudit(n bool) bool { return rcv._tab.MutateBoolSlot(6, n) } -func (rcv *ConnectReplyRaw) CoverEdges() bool { +func (rcv *ConnectReplyRaw) Cover() bool { o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) if o != 0 { return rcv._tab.GetBool(o + rcv._tab.Pos) @@ -873,11 +879,11 @@ func (rcv *ConnectReplyRaw) CoverEdges() bool { return false } -func (rcv *ConnectReplyRaw) MutateCoverEdges(n bool) bool { +func (rcv *ConnectReplyRaw) MutateCover(n bool) bool { return rcv._tab.MutateBoolSlot(8, n) } -func (rcv *ConnectReplyRaw) Kernel64Bit() bool { +func (rcv *ConnectReplyRaw) CoverEdges() bool { o := flatbuffers.UOffsetT(rcv._tab.Offset(10)) if o != 0 { return rcv._tab.GetBool(o + rcv._tab.Pos) @@ -885,12 +891,24 @@ func (rcv *ConnectReplyRaw) Kernel64Bit() bool { return false } -func (rcv *ConnectReplyRaw) MutateKernel64Bit(n bool) bool { +func (rcv *ConnectReplyRaw) MutateCoverEdges(n bool) bool { return rcv._tab.MutateBoolSlot(10, n) } -func (rcv *ConnectReplyRaw) Procs() int32 { +func (rcv *ConnectReplyRaw) Kernel64Bit() bool { o := flatbuffers.UOffsetT(rcv._tab.Offset(12)) + if o != 0 { + return rcv._tab.GetBool(o + rcv._tab.Pos) + } + return false +} + +func (rcv *ConnectReplyRaw) MutateKernel64Bit(n bool) bool { + return rcv._tab.MutateBoolSlot(12, n) +} + +func (rcv *ConnectReplyRaw) Procs() int32 { + o := flatbuffers.UOffsetT(rcv._tab.Offset(14)) if o != 0 { return rcv._tab.GetInt32(o + rcv._tab.Pos) } @@ -898,11 +916,11 @@ func (rcv *ConnectReplyRaw) Procs() int32 { } func (rcv *ConnectReplyRaw) MutateProcs(n int32) bool { - return rcv._tab.MutateInt32Slot(12, n) + return rcv._tab.MutateInt32Slot(14, n) } func (rcv *ConnectReplyRaw) Slowdown() int32 { - o := flatbuffers.UOffsetT(rcv._tab.Offset(14)) + o := flatbuffers.UOffsetT(rcv._tab.Offset(16)) if o != 0 { return rcv._tab.GetInt32(o + rcv._tab.Pos) } @@ -910,11 +928,11 @@ func (rcv *ConnectReplyRaw) Slowdown() int32 { } func (rcv *ConnectReplyRaw) MutateSlowdown(n int32) bool { - return rcv._tab.MutateInt32Slot(14, n) + return rcv._tab.MutateInt32Slot(16, n) } func (rcv *ConnectReplyRaw) SyscallTimeoutMs() int32 { - o := flatbuffers.UOffsetT(rcv._tab.Offset(16)) + o := flatbuffers.UOffsetT(rcv._tab.Offset(18)) if o != 0 { return rcv._tab.GetInt32(o + rcv._tab.Pos) } @@ -922,11 +940,11 @@ func (rcv *ConnectReplyRaw) SyscallTimeoutMs() int32 { } func (rcv *ConnectReplyRaw) MutateSyscallTimeoutMs(n int32) bool { - return rcv._tab.MutateInt32Slot(16, n) + return rcv._tab.MutateInt32Slot(18, n) } func (rcv *ConnectReplyRaw) ProgramTimeoutMs() int32 { - o := flatbuffers.UOffsetT(rcv._tab.Offset(18)) + o := flatbuffers.UOffsetT(rcv._tab.Offset(20)) if o != 0 { return rcv._tab.GetInt32(o + rcv._tab.Pos) } @@ -934,11 +952,11 @@ func (rcv *ConnectReplyRaw) ProgramTimeoutMs() int32 { } func (rcv *ConnectReplyRaw) MutateProgramTimeoutMs(n int32) bool { - return rcv._tab.MutateInt32Slot(18, n) + return rcv._tab.MutateInt32Slot(20, n) } func (rcv *ConnectReplyRaw) LeakFrames(j int) []byte { - o := flatbuffers.UOffsetT(rcv._tab.Offset(20)) + o := flatbuffers.UOffsetT(rcv._tab.Offset(22)) if o != 0 { a := rcv._tab.Vector(o) return rcv._tab.ByteVector(a + flatbuffers.UOffsetT(j*4)) @@ -947,7 +965,7 @@ func (rcv *ConnectReplyRaw) LeakFrames(j int) []byte { } func (rcv *ConnectReplyRaw) LeakFramesLength() int { - o := flatbuffers.UOffsetT(rcv._tab.Offset(20)) + o := flatbuffers.UOffsetT(rcv._tab.Offset(22)) if o != 0 { return rcv._tab.VectorLen(o) } @@ -955,7 +973,7 @@ func (rcv *ConnectReplyRaw) LeakFramesLength() int { } func (rcv *ConnectReplyRaw) RaceFrames(j int) []byte { - o := flatbuffers.UOffsetT(rcv._tab.Offset(22)) + o := flatbuffers.UOffsetT(rcv._tab.Offset(24)) if o != 0 { a := rcv._tab.Vector(o) return rcv._tab.ByteVector(a + flatbuffers.UOffsetT(j*4)) @@ -964,7 +982,7 @@ func (rcv *ConnectReplyRaw) RaceFrames(j int) []byte { } func (rcv *ConnectReplyRaw) RaceFramesLength() int { - o := flatbuffers.UOffsetT(rcv._tab.Offset(22)) + o := flatbuffers.UOffsetT(rcv._tab.Offset(24)) if o != 0 { return rcv._tab.VectorLen(o) } @@ -972,7 +990,7 @@ func (rcv *ConnectReplyRaw) RaceFramesLength() int { } func (rcv *ConnectReplyRaw) Features() Feature { - o := flatbuffers.UOffsetT(rcv._tab.Offset(24)) + o := flatbuffers.UOffsetT(rcv._tab.Offset(26)) if o != 0 { return Feature(rcv._tab.GetUint64(o + rcv._tab.Pos)) } @@ -980,11 +998,11 @@ func (rcv *ConnectReplyRaw) Features() Feature { } func (rcv *ConnectReplyRaw) MutateFeatures(n Feature) bool { - return rcv._tab.MutateUint64Slot(24, uint64(n)) + return rcv._tab.MutateUint64Slot(26, uint64(n)) } func (rcv *ConnectReplyRaw) Files(j int) []byte { - o := flatbuffers.UOffsetT(rcv._tab.Offset(26)) + o := flatbuffers.UOffsetT(rcv._tab.Offset(28)) if o != 0 { a := rcv._tab.Vector(o) return rcv._tab.ByteVector(a + flatbuffers.UOffsetT(j*4)) @@ -993,7 +1011,7 @@ func (rcv *ConnectReplyRaw) Files(j int) []byte { } func (rcv *ConnectReplyRaw) FilesLength() int { - o := flatbuffers.UOffsetT(rcv._tab.Offset(26)) + o := flatbuffers.UOffsetT(rcv._tab.Offset(28)) if o != 0 { return rcv._tab.VectorLen(o) } @@ -1001,49 +1019,52 @@ func (rcv *ConnectReplyRaw) FilesLength() int { } func ConnectReplyRawStart(builder *flatbuffers.Builder) { - builder.StartObject(12) + builder.StartObject(13) } func ConnectReplyRawAddDebug(builder *flatbuffers.Builder, debug bool) { builder.PrependBoolSlot(0, debug, false) } +func ConnectReplyRawAddAudit(builder *flatbuffers.Builder, audit bool) { + builder.PrependBoolSlot(1, audit, false) +} func ConnectReplyRawAddCover(builder *flatbuffers.Builder, cover bool) { - builder.PrependBoolSlot(1, cover, false) + builder.PrependBoolSlot(2, cover, false) } func ConnectReplyRawAddCoverEdges(builder *flatbuffers.Builder, coverEdges bool) { - builder.PrependBoolSlot(2, coverEdges, false) + builder.PrependBoolSlot(3, coverEdges, false) } func ConnectReplyRawAddKernel64Bit(builder *flatbuffers.Builder, kernel64Bit bool) { - builder.PrependBoolSlot(3, kernel64Bit, false) + builder.PrependBoolSlot(4, kernel64Bit, false) } func ConnectReplyRawAddProcs(builder *flatbuffers.Builder, procs int32) { - builder.PrependInt32Slot(4, procs, 0) + builder.PrependInt32Slot(5, procs, 0) } func ConnectReplyRawAddSlowdown(builder *flatbuffers.Builder, slowdown int32) { - builder.PrependInt32Slot(5, slowdown, 0) + builder.PrependInt32Slot(6, slowdown, 0) } func ConnectReplyRawAddSyscallTimeoutMs(builder *flatbuffers.Builder, syscallTimeoutMs int32) { - builder.PrependInt32Slot(6, syscallTimeoutMs, 0) + builder.PrependInt32Slot(7, syscallTimeoutMs, 0) } func ConnectReplyRawAddProgramTimeoutMs(builder *flatbuffers.Builder, programTimeoutMs int32) { - builder.PrependInt32Slot(7, programTimeoutMs, 0) + builder.PrependInt32Slot(8, programTimeoutMs, 0) } func ConnectReplyRawAddLeakFrames(builder *flatbuffers.Builder, leakFrames flatbuffers.UOffsetT) { - builder.PrependUOffsetTSlot(8, flatbuffers.UOffsetT(leakFrames), 0) + builder.PrependUOffsetTSlot(9, flatbuffers.UOffsetT(leakFrames), 0) } func ConnectReplyRawStartLeakFramesVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { return builder.StartVector(4, numElems, 4) } func ConnectReplyRawAddRaceFrames(builder *flatbuffers.Builder, raceFrames flatbuffers.UOffsetT) { - builder.PrependUOffsetTSlot(9, flatbuffers.UOffsetT(raceFrames), 0) + builder.PrependUOffsetTSlot(10, flatbuffers.UOffsetT(raceFrames), 0) } func ConnectReplyRawStartRaceFramesVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { return builder.StartVector(4, numElems, 4) } func ConnectReplyRawAddFeatures(builder *flatbuffers.Builder, features Feature) { - builder.PrependUint64Slot(10, uint64(features), 0) + builder.PrependUint64Slot(11, uint64(features), 0) } func ConnectReplyRawAddFiles(builder *flatbuffers.Builder, files flatbuffers.UOffsetT) { - builder.PrependUOffsetTSlot(11, flatbuffers.UOffsetT(files), 0) + builder.PrependUOffsetTSlot(12, flatbuffers.UOffsetT(files), 0) } func ConnectReplyRawStartFilesVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { return builder.StartVector(4, numElems, 4) diff --git a/pkg/flatrpc/flatrpc.h b/pkg/flatrpc/flatrpc.h index 6f7c181ec7d0..70cf3aa45eb3 100644 --- a/pkg/flatrpc/flatrpc.h +++ b/pkg/flatrpc/flatrpc.h @@ -528,23 +528,27 @@ inline const char *EnumNameRequestType(RequestType e) { enum class RequestFlag : uint64_t { ReturnOutput = 1ULL, - ReturnError = 2ULL, + ReturnAudit = 2ULL, + ReturnError = 4ULL, NONE = 0, - ANY = 3ULL + ANY = 7ULL }; FLATBUFFERS_DEFINE_BITMASK_OPERATORS(RequestFlag, uint64_t) -inline const RequestFlag (&EnumValuesRequestFlag())[2] { +inline const RequestFlag (&EnumValuesRequestFlag())[3] { static const RequestFlag values[] = { RequestFlag::ReturnOutput, + RequestFlag::ReturnAudit, RequestFlag::ReturnError }; return values; } inline const char * const *EnumNamesRequestFlag() { - static const char * const names[3] = { + static const char * const names[5] = { "ReturnOutput", + "ReturnAudit", + "", "ReturnError", nullptr }; @@ -1029,6 +1033,7 @@ flatbuffers::Offset CreateConnectRequestRaw(flatbuffers::Flat struct ConnectReplyRawT : public flatbuffers::NativeTable { typedef ConnectReplyRaw TableType; bool debug = false; + bool audit = false; bool cover = false; bool cover_edges = false; bool kernel_64_bit = false; @@ -1047,21 +1052,25 @@ struct ConnectReplyRaw FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef ConnectReplyRawBuilder Builder; enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_DEBUG = 4, - VT_COVER = 6, - VT_COVER_EDGES = 8, - VT_KERNEL_64_BIT = 10, - VT_PROCS = 12, - VT_SLOWDOWN = 14, - VT_SYSCALL_TIMEOUT_MS = 16, - VT_PROGRAM_TIMEOUT_MS = 18, - VT_LEAK_FRAMES = 20, - VT_RACE_FRAMES = 22, - VT_FEATURES = 24, - VT_FILES = 26 + VT_AUDIT = 6, + VT_COVER = 8, + VT_COVER_EDGES = 10, + VT_KERNEL_64_BIT = 12, + VT_PROCS = 14, + VT_SLOWDOWN = 16, + VT_SYSCALL_TIMEOUT_MS = 18, + VT_PROGRAM_TIMEOUT_MS = 20, + VT_LEAK_FRAMES = 22, + VT_RACE_FRAMES = 24, + VT_FEATURES = 26, + VT_FILES = 28 }; bool debug() const { return GetField(VT_DEBUG, 0) != 0; } + bool audit() const { + return GetField(VT_AUDIT, 0) != 0; + } bool cover() const { return GetField(VT_COVER, 0) != 0; } @@ -1098,6 +1107,7 @@ struct ConnectReplyRaw FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_DEBUG, 1) && + VerifyField(verifier, VT_AUDIT, 1) && VerifyField(verifier, VT_COVER, 1) && VerifyField(verifier, VT_COVER_EDGES, 1) && VerifyField(verifier, VT_KERNEL_64_BIT, 1) && @@ -1129,6 +1139,9 @@ struct ConnectReplyRawBuilder { void add_debug(bool debug) { fbb_.AddElement(ConnectReplyRaw::VT_DEBUG, static_cast(debug), 0); } + void add_audit(bool audit) { + fbb_.AddElement(ConnectReplyRaw::VT_AUDIT, static_cast(audit), 0); + } void add_cover(bool cover) { fbb_.AddElement(ConnectReplyRaw::VT_COVER, static_cast(cover), 0); } @@ -1176,6 +1189,7 @@ struct ConnectReplyRawBuilder { inline flatbuffers::Offset CreateConnectReplyRaw( flatbuffers::FlatBufferBuilder &_fbb, bool debug = false, + bool audit = false, bool cover = false, bool cover_edges = false, bool kernel_64_bit = false, @@ -1199,6 +1213,7 @@ inline flatbuffers::Offset CreateConnectReplyRaw( builder_.add_kernel_64_bit(kernel_64_bit); builder_.add_cover_edges(cover_edges); builder_.add_cover(cover); + builder_.add_audit(audit); builder_.add_debug(debug); return builder_.Finish(); } @@ -1206,6 +1221,7 @@ inline flatbuffers::Offset CreateConnectReplyRaw( inline flatbuffers::Offset CreateConnectReplyRawDirect( flatbuffers::FlatBufferBuilder &_fbb, bool debug = false, + bool audit = false, bool cover = false, bool cover_edges = false, bool kernel_64_bit = false, @@ -1223,6 +1239,7 @@ inline flatbuffers::Offset CreateConnectReplyRawDirect( return rpc::CreateConnectReplyRaw( _fbb, debug, + audit, cover, cover_edges, kernel_64_bit, @@ -3045,6 +3062,7 @@ inline void ConnectReplyRaw::UnPackTo(ConnectReplyRawT *_o, const flatbuffers::r (void)_o; (void)_resolver; { auto _e = debug(); _o->debug = _e; } + { auto _e = audit(); _o->audit = _e; } { auto _e = cover(); _o->cover = _e; } { auto _e = cover_edges(); _o->cover_edges = _e; } { auto _e = kernel_64_bit(); _o->kernel_64_bit = _e; } @@ -3067,6 +3085,7 @@ inline flatbuffers::Offset CreateConnectReplyRaw(flatbuffers::F (void)_o; struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ConnectReplyRawT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; auto _debug = _o->debug; + auto _audit = _o->audit; auto _cover = _o->cover; auto _cover_edges = _o->cover_edges; auto _kernel_64_bit = _o->kernel_64_bit; @@ -3081,6 +3100,7 @@ inline flatbuffers::Offset CreateConnectReplyRaw(flatbuffers::F return rpc::CreateConnectReplyRaw( _fbb, _debug, + _audit, _cover, _cover_edges, _kernel_64_bit, diff --git a/pkg/fuzzer/fuzzer.go b/pkg/fuzzer/fuzzer.go index 2014340ad779..28bde40fa66e 100644 --- a/pkg/fuzzer/fuzzer.go +++ b/pkg/fuzzer/fuzzer.go @@ -11,10 +11,12 @@ import ( "sort" "sync" "time" + "strings" "github.com/google/syzkaller/pkg/corpus" "github.com/google/syzkaller/pkg/csource" "github.com/google/syzkaller/pkg/flatrpc" + "github.com/google/syzkaller/pkg/log" "github.com/google/syzkaller/pkg/fuzzer/queue" "github.com/google/syzkaller/pkg/mgrconfig" "github.com/google/syzkaller/pkg/signal" @@ -188,6 +190,14 @@ func (fuzzer *Fuzzer) processResult(req *queue.Request, res *queue.Result, flags } fuzzer.handleCallInfo(req, res.Info.Extra, -1) } + if req.ReturnAudit { + audit_output := string(res.Output) + index := strings.Index(audit_output, "Audit messages:") + if index != -1 { + log.Logf(0, "Security context: %s", req.Prog.SecContext) + log.Logf(0, "\n%s\n", audit_output[index:]) + } + } // Corpus candidates may have flaky coverage, so we give them a second chance. maxCandidateAttempts := 3 @@ -222,6 +232,7 @@ type Config struct { NoMutateCalls map[int]bool FetchRawCover bool SecContexts []string + Audit bool NewInputFilter func(call string) bool PatchTest bool ModeKFuzzTest bool @@ -372,6 +383,7 @@ func (fuzzer *Fuzzer) AddCandidates(candidates []Candidate) { Prog: candidate.Prog, ExecOpts: setFlags(flatrpc.ExecFlagCollectSignal), Stat: fuzzer.statExecCandidate, + ReturnAudit: fuzzer.Config.Audit, Important: true, } if fuzzer.SecContextGen != nil { diff --git a/pkg/fuzzer/job.go b/pkg/fuzzer/job.go index b46f317be716..aea0c9207be8 100644 --- a/pkg/fuzzer/job.go +++ b/pkg/fuzzer/job.go @@ -48,6 +48,7 @@ func genProgRequest(fuzzer *Fuzzer, rnd *rand.Rand) *queue.Request { p.SecContext = fuzzer.SecContextGen.getSecLabel() return &queue.Request{ Prog: p, + ReturnAudit: fuzzer.Config.Audit, ExecOpts: setFlags(flatrpc.ExecFlagCollectSignal), Stat: fuzzer.statExecGenerate, } @@ -67,6 +68,7 @@ func mutateProgRequest(fuzzer *Fuzzer, rnd *rand.Rand) *queue.Request { ) return &queue.Request{ Prog: newP, + ReturnAudit: fuzzer.Config.Audit, ExecOpts: setFlags(flatrpc.ExecFlagCollectSignal), Stat: fuzzer.statExecFuzz, } diff --git a/pkg/fuzzer/queue/queue.go b/pkg/fuzzer/queue/queue.go index 20509dcb72c4..03d1a2695c7b 100644 --- a/pkg/fuzzer/queue/queue.go +++ b/pkg/fuzzer/queue/queue.go @@ -35,6 +35,7 @@ type Request struct { ReturnAllSignal []int ReturnError bool ReturnOutput bool + ReturnAudit bool // This stat will be incremented on request completion. Stat *stat.Val diff --git a/pkg/mgrconfig/config.go b/pkg/mgrconfig/config.go index c365a22613dc..4b0202f4229c 100644 --- a/pkg/mgrconfig/config.go +++ b/pkg/mgrconfig/config.go @@ -261,6 +261,9 @@ type Experimental struct { // List of security contexts that can be attached to programs. SecContexts []string `json:"seccontexts"` + + // Collect audit logs generated by a program. + AuditProgs bool `json:"audit"` } type FocusArea struct { diff --git a/pkg/rpcserver/rpcserver.go b/pkg/rpcserver/rpcserver.go index b0ab14c17e71..20eb4d4cb686 100644 --- a/pkg/rpcserver/rpcserver.go +++ b/pkg/rpcserver/rpcserver.go @@ -169,6 +169,7 @@ func New(cfg *RemoteConfig) (Server, error) { Features: features, Syscalls: cfg.Syscalls, Debug: cfg.Debug, + Audit: cfg.Experimental.AuditProgs, Cover: cfg.Cover, Sandbox: sandbox, SandboxArg: cfg.SandboxArg, @@ -542,6 +543,7 @@ func (serv *server) CreateInstance(id int, injectExec chan<- bool, updInfo dispa coverEdges: serv.cfg.UseCoverEdges, filterSignal: serv.cfg.FilterSignal, debug: serv.cfg.Debug, + audit: serv.cfg.Audit, debugTimeouts: serv.cfg.DebugTimeouts, sysTarget: serv.sysTarget, injectExec: injectExec, diff --git a/pkg/rpcserver/runner.go b/pkg/rpcserver/runner.go index fa676a378a33..c614324d22af 100644 --- a/pkg/rpcserver/runner.go +++ b/pkg/rpcserver/runner.go @@ -32,6 +32,7 @@ type Runner struct { coverEdges bool filterSignal bool debug bool + audit bool debugTimeouts bool sysTarget *targets.Target stats *runnerStats @@ -93,6 +94,7 @@ func (runner *Runner) Handshake(conn *flatrpc.Conn, cfg *handshakeConfig) (hands connectReply := &flatrpc.ConnectReply{ Debug: runner.debug, + Audit: runner.audit, Cover: runner.cover, CoverEdges: runner.coverEdges, Kernel64Bit: runner.sysTarget.PtrSize == 8, @@ -280,6 +282,9 @@ func (runner *Runner) sendRequest(req *queue.Request) error { if req.ReturnOutput { flags |= flatrpc.RequestFlagReturnOutput } + if req.ReturnAudit { + flags |= flatrpc.RequestFlagReturnAudit + } if req.ReturnError { flags |= flatrpc.RequestFlagReturnError } diff --git a/pkg/vminfo/vminfo.go b/pkg/vminfo/vminfo.go index 41fadf59a08c..a8ef22879ef9 100644 --- a/pkg/vminfo/vminfo.go +++ b/pkg/vminfo/vminfo.go @@ -50,6 +50,7 @@ type Config struct { // Set of syscalls to check. Syscalls []int Debug bool + Audit bool Cover bool Sandbox flatrpc.ExecEnv SandboxArg int64 diff --git a/syz-manager/manager.go b/syz-manager/manager.go index 68a1db19fa1e..fdb11f1ad4a6 100644 --- a/syz-manager/manager.go +++ b/syz-manager/manager.go @@ -1174,6 +1174,7 @@ func (mgr *Manager) MachineChecked(features flatrpc.Feature, Collide: true, EnabledCalls: enabledSyscalls, NoMutateCalls: mgr.cfg.NoMutateCalls, + Audit: mgr.cfg.Experimental.AuditProgs, FetchRawCover: mgr.cfg.RawCover, Logf: func(level int, msg string, args ...interface{}) { if level != 0 {