[PATCH 2/2][SRU][Disco] UBUNTU: ubuntu: vbox -- update to 6.0.6-dfsg-1
Seth Forshee
seth.forshee at canonical.com
Thu Apr 18 13:16:49 UTC 2019
BugLink: https://bugs.launchpad.net/bugs/1825210
Signed-off-by: Seth Forshee <seth.forshee at canonical.com>
---
ubuntu/vbox/BOM | 4 +-
ubuntu/vbox/dkms.conf | 2 +-
ubuntu/vbox/vboxguest/Makefile | 80 +-
ubuntu/vbox/vboxguest/Makefile-footer.gmk | 128 +
.../Makefile-header.gmk} | 124 +-
ubuntu/vbox/vboxguest/Makefile.include.footer | 117 -
ubuntu/vbox/vboxguest/VBoxGuest-linux.c | 6 +-
ubuntu/vbox/vboxguest/VBoxGuest.c | 9 +-
.../vboxguest/VBoxGuestR0LibGenericRequest.c | 4 +-
.../vboxguest/VBoxGuestR0LibHGCMInternal.c | 2 +-
.../vbox/vboxguest/VBoxGuestR0LibPhysHeap.c | 86 +-
.../common/err/RTErrConvertFromErrno.c | 8 +-
ubuntu/vbox/vboxguest/common/log/log.c | 97 +-
ubuntu/vbox/vboxguest/common/log/logcom.c | 4 +-
.../vboxguest/common/table/avl_Base.cpp.h | 24 +-
.../vbox/vboxguest/common/table/avl_Get.cpp.h | 2 +-
.../common/table/avl_GetBestFit.cpp.h | 2 +-
ubuntu/vbox/vboxguest/common/table/avlpv.c | 2 +-
.../vbox/vboxguest/include/VBox/VBoxGuest.h | 2 +-
ubuntu/vbox/vboxguest/include/VBox/VMMDev.h | 56 +-
.../vboxguest/include/VBox/VMMDevCoreTypes.h | 1 +
ubuntu/vbox/vboxguest/include/VBox/err.h | 14 +-
.../vboxguest/include/internal/initterm.h | 5 +
.../vboxguest/include/iprt/assertcompile.h | 6 +-
ubuntu/vbox/vboxguest/include/iprt/cdefs.h | 16 +-
ubuntu/vbox/vboxguest/include/iprt/err.h | 2 +
ubuntu/vbox/vboxguest/include/iprt/errcore.h | 4 +-
ubuntu/vbox/vboxguest/include/iprt/log.h | 22 +-
ubuntu/vbox/vboxguest/include/iprt/mangling.h | 11 +
ubuntu/vbox/vboxguest/include/iprt/types.h | 27 +-
ubuntu/vbox/vboxguest/include/iprt/x86.h | 4 +-
ubuntu/vbox/vboxguest/r0drv/alloc-r0drv.h | 3 +
.../vboxguest/r0drv/linux/alloc-r0drv-linux.c | 2 +
.../r0drv/linux/initterm-r0drv-linux.c | 7 -
.../r0drv/linux/memobj-r0drv-linux.c | 19 +-
.../vboxguest/r0drv/linux/the-linux-kernel.h | 10 +-
.../vboxguest/r0drv/linux/timer-r0drv-linux.c | 4 +-
ubuntu/vbox/vboxguest/revision-generated.h | 2 +-
ubuntu/vbox/vboxguest/version-generated.h | 6 +-
ubuntu/vbox/vboxsf/Makefile | 92 +-
ubuntu/vbox/vboxsf/Makefile-footer.gmk | 128 +
.../Makefile-header.gmk} | 124 +-
ubuntu/vbox/vboxsf/Makefile.include.footer | 117 -
.../vboxsf/VBoxGuestR0LibGenericRequest.c | 183 +
ubuntu/vbox/vboxsf/VBoxGuestR0LibInit.c | 333 ++
ubuntu/vbox/vboxsf/VBoxGuestR0LibPhysHeap.c | 664 +++
.../vbox/vboxsf/VBoxGuestR0LibSharedFolders.c | 33 +-
ubuntu/vbox/vboxsf/dirops.c | 1821 ++++---
ubuntu/vbox/vboxsf/include/VBox/VBoxGuest.h | 2 +-
.../VBox/VBoxGuestLibSharedFoldersInline.h | 1517 ++++++
ubuntu/vbox/vboxsf/include/VBox/VMMDev.h | 56 +-
.../vboxsf/include/VBox/VMMDevCoreTypes.h | 1 +
ubuntu/vbox/vboxsf/include/VBox/err.h | 14 +-
ubuntu/vbox/vboxsf/include/VBox/shflsvc.h | 147 +-
.../vbox/vboxsf/include/iprt/assertcompile.h | 6 +-
ubuntu/vbox/vboxsf/include/iprt/cdefs.h | 16 +-
ubuntu/vbox/vboxsf/include/iprt/err.h | 2 +
ubuntu/vbox/vboxsf/include/iprt/errcore.h | 4 +-
ubuntu/vbox/vboxsf/include/iprt/fs.h | 3 +
ubuntu/vbox/vboxsf/include/iprt/list.h | 539 ++
ubuntu/vbox/vboxsf/include/iprt/log.h | 22 +-
ubuntu/vbox/vboxsf/include/iprt/mangling.h | 11 +
ubuntu/vbox/vboxsf/include/iprt/types.h | 27 +-
ubuntu/vbox/vboxsf/lnkops.c | 330 +-
.../vboxsf/r0drv/linux/the-linux-kernel.h | 10 +-
ubuntu/vbox/vboxsf/regops.c | 4354 ++++++++++++++---
ubuntu/vbox/vboxsf/revision-generated.h | 2 +-
ubuntu/vbox/vboxsf/utils.c | 1870 ++++---
ubuntu/vbox/vboxsf/vbsfmount.h | 149 +-
ubuntu/vbox/vboxsf/version-generated.h | 6 +-
ubuntu/vbox/vboxsf/vfsmod.c | 1382 ++++--
ubuntu/vbox/vboxsf/vfsmod.h | 489 +-
ubuntu/vbox/vboxvideo/Makefile | 44 +-
ubuntu/vbox/vboxvideo/Makefile-footer.gmk | 128 +
.../Makefile-header.gmk} | 124 +-
ubuntu/vbox/vboxvideo/Makefile.include.footer | 117 -
ubuntu/vbox/vboxvideo/revision-generated.h | 2 +-
ubuntu/vbox/vboxvideo/vbox_drv.c | 9 +-
ubuntu/vbox/vboxvideo/vbox_drv.h | 2 +
ubuntu/vbox/vboxvideo/vbox_irq.c | 4 +
ubuntu/vbox/vboxvideo/vbox_main.c | 16 +-
ubuntu/vbox/vboxvideo/vbox_mode.c | 5 +
ubuntu/vbox/vboxvideo/vbox_ttm.c | 23 +-
ubuntu/vbox/vboxvideo/version-generated.h | 6 +-
84 files changed, 12181 insertions(+), 3677 deletions(-)
create mode 100644 ubuntu/vbox/vboxguest/Makefile-footer.gmk
rename ubuntu/vbox/{vboxvideo/Makefile.include.header => vboxguest/Makefile-header.gmk} (51%)
delete mode 100644 ubuntu/vbox/vboxguest/Makefile.include.footer
create mode 100644 ubuntu/vbox/vboxsf/Makefile-footer.gmk
rename ubuntu/vbox/{vboxguest/Makefile.include.header => vboxsf/Makefile-header.gmk} (51%)
delete mode 100644 ubuntu/vbox/vboxsf/Makefile.include.footer
create mode 100644 ubuntu/vbox/vboxsf/VBoxGuestR0LibGenericRequest.c
create mode 100644 ubuntu/vbox/vboxsf/VBoxGuestR0LibInit.c
create mode 100644 ubuntu/vbox/vboxsf/VBoxGuestR0LibPhysHeap.c
create mode 100644 ubuntu/vbox/vboxsf/include/VBox/VBoxGuestLibSharedFoldersInline.h
create mode 100644 ubuntu/vbox/vboxsf/include/iprt/list.h
create mode 100644 ubuntu/vbox/vboxvideo/Makefile-footer.gmk
rename ubuntu/vbox/{vboxsf/Makefile.include.header => vboxvideo/Makefile-header.gmk} (51%)
delete mode 100644 ubuntu/vbox/vboxvideo/Makefile.include.footer
diff --git a/ubuntu/vbox/BOM b/ubuntu/vbox/BOM
index 7e3e8e0fb2b7..b246c1ef86ed 100644
--- a/ubuntu/vbox/BOM
+++ b/ubuntu/vbox/BOM
@@ -1,2 +1,2 @@
-Source: http://ports.ubuntu.com/pool/multiverse/v/virtualbox/virtualbox-guest-dkms_6.0.4-dfsg-7_all.deb
-Version: 6.0.4-dfsg-7
+Source: http://ports.ubuntu.com/pool/multiverse/v/virtualbox/virtualbox-guest-dkms_6.0.6-dfsg-1_all.deb
+Version: 6.0.6-dfsg-1
diff --git a/ubuntu/vbox/dkms.conf b/ubuntu/vbox/dkms.conf
index 2b637b284055..a35e27396764 100644
--- a/ubuntu/vbox/dkms.conf
+++ b/ubuntu/vbox/dkms.conf
@@ -1,5 +1,5 @@
PACKAGE_NAME="virtualbox-guest"
-PACKAGE_VERSION="6.0.4"
+PACKAGE_VERSION="6.0.6"
CLEAN="rm -f *.*o"
BUILT_MODULE_NAME[0]="vboxguest"
BUILT_MODULE_LOCATION[0]="vboxguest"
diff --git a/ubuntu/vbox/vboxguest/Makefile b/ubuntu/vbox/vboxguest/Makefile
index fbb4e783b88f..7aea5ccdae8f 100644
--- a/ubuntu/vbox/vboxguest/Makefile
+++ b/ubuntu/vbox/vboxguest/Makefile
@@ -1,5 +1,5 @@
KBUILD_EXTMOD=${srctree}/ubuntu/vbox
-# $Revision: 127855 $
+# $Id: Makefile $
## @file
# VirtualBox Guest Additions Module Makefile.
#
@@ -27,11 +27,11 @@ KBUILD_EXTMOD=${srctree}/ubuntu/vbox
# Linux kbuild sets this to our source directory if we are called from there
obj ?= $(CURDIR)
-include $(obj)/Makefile.include.header
+include $(obj)/Makefile-header.gmk
+VBOXGUEST_DIR = $(VBOX_MODULE_SRC_DIR)
-MOD_NAME = vboxguest
-
-MOD_OBJS = \
+VBOXMOD_NAME = vboxguest
+VBOXMOD_OBJS = \
VBoxGuest-linux.o \
VBoxGuest.o \
VBoxGuestR0LibGenericRequest.o \
@@ -115,7 +115,7 @@ MOD_OBJS = \
VBox/log-vbox.o \
VBox/logbackdoor.o
ifeq ($(BUILD_TARGET_ARCH),x86)
-MOD_OBJS += \
+VBOXMOD_OBJS += \
common/math/gcc/divdi3.o \
common/math/gcc/moddi3.o \
common/math/gcc/udivdi3.o \
@@ -124,42 +124,58 @@ MOD_OBJS += \
common/math/gcc/qdivrem.o
endif
ifeq ($(BUILD_TARGET_ARCH),amd64)
-MOD_OBJS += common/alloc/heapsimple.o
+VBOXMOD_OBJS += common/alloc/heapsimple.o
endif
-MOD_DEFS = -DVBOX -DRT_OS_LINUX -DIN_RING0 -DIN_RT_R0 -DIN_GUEST \
- -DIN_GUEST_R0 -DIN_MODULE -DRT_WITH_VBOX -DVBGL_VBOXGUEST \
- -DVBOX_WITH_HGCM
+VBOXMOD_DEFS = \
+ VBOX \
+ RT_OS_LINUX \
+ IN_RING0 \
+ IN_RT_R0 \
+ IN_GUEST \
+ IN_GUEST_R0 \
+ IN_MODULE \
+ RT_WITH_VBOX \
+ VBGL_VBOXGUEST \
+ VBOX_WITH_HGCM
ifeq ($(BUILD_TARGET_ARCH),amd64)
- MOD_DEFS += -DRT_ARCH_AMD64
-else
- MOD_DEFS += -DRT_ARCH_X86
+VBOXMOD_DEFS += VBOX_WITH_64_BITS_GUESTS
endif
-ifeq ($(BUILD_TARGET_ARCH),amd64)
- MOD_DEFS += -DVBOX_WITH_64_BITS_GUESTS
+ifeq ($(KERN_VERSION),24)
+VBOXMOD_DEFS += EXPORT_SYMTAB
endif
-MOD_INCL = $(addprefix -I$(KBUILD_EXTMOD),/ /include /r0drv/linux)
-MOD_INCL += $(addprefix -I$(KBUILD_EXTMOD)/vboxguest,/ /include /r0drv/linux)
-ifneq ($(wildcard $(KBUILD_EXTMOD)/vboxguest),)
- MANGLING := $(KBUILD_EXTMOD)/vboxguest/include/VBox/VBoxGuestMangling.h
-else
- MANGLING := $(KBUILD_EXTMOD)/include/VBox/VBoxGuestMangling.h
-endif
-ifeq ($(KERN_VERSION),24)
- ## @todo move to MOD_DEFS when we have finished refactoring
- MOD_CFLAGS = -DEXPORT_SYMTAB
-else
- MOD_CFLAGS = -Wno-declaration-after-statement -include $(MANGLING) -fno-pie
+VBOXMOD_INCL = \
+ $(VBOXGUEST_DIR) \
+ $(VBOXGUEST_DIR)include \
+ $(VBOXGUEST_DIR)r0drv/linux
+
+VBOXMOD_CFLAGS := $(call VBOX_GCC_CHECK_CC,-Wno-declaration-after-statement,-Wno-declaration-after-statement,,)
+VBOXMOD_CFLAGS += $(call VBOX_GCC_CHECK_CC,-fno-pie,-fno-pie,,)
+ifneq ($(KERN_VERSION),24)
+VBOXMOD_CFLAGS += -include $(VBOXGUEST_DIR)include/VBox/VBoxGuestMangling.h
endif
-MOD_CLEAN = . linux r0drv generic r0drv/linux r0drv/generic VBox \
- common/alloc common/err common/log common/math/gcc common/misc \
- common/string common/table common/time
+VBOXMOD_CLEAN = \
+ . \
+ linux \
+ r0drv \
+ generic \
+ r0drv/linux \
+ r0drv/generic \
+ VBox \
+ common/alloc \
+ common/err \
+ common/log \
+ common/math/gcc \
+ common/misc \
+ common/string \
+ common/table \
+ common/time
-include $(obj)/Makefile.include.footer
+include $(obj)/Makefile-footer.gmk
-check: $(MOD_NAME)
+check: $(VBOXMOD_NAME)
@if ! readelf -p __ksymtab_strings vboxguest.ko | grep -E "\[.*\] *(RT|g_..*RT.*)"; then \
echo "All exported IPRT symbols are properly renamed!"; \
else \
diff --git a/ubuntu/vbox/vboxguest/Makefile-footer.gmk b/ubuntu/vbox/vboxguest/Makefile-footer.gmk
new file mode 100644
index 000000000000..adc2c2ebaaa1
--- /dev/null
+++ b/ubuntu/vbox/vboxguest/Makefile-footer.gmk
@@ -0,0 +1,128 @@
+# $Id: Makefile-footer.gmk $
+## @file
+# VirtualBox Guest Additions kernel module Makefile, common parts.
+#
+# See Makefile-header.gmk for details of how to use this.
+#
+
+#
+# Copyright (C) 2006-2019 Oracle Corporation
+#
+# This file is part of VirtualBox Open Source Edition (OSE), as
+# available from http://www.virtualbox.org. This file is free software;
+# you can redistribute it and/or modify it under the terms of the GNU
+# General Public License (GPL) as published by the Free Software
+# Foundation, in version 2 as it comes in the "COPYING" file of the
+# VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+# hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+#
+
+VBOXMOD_0_TARGET = $(VBOXMOD_NAME)
+
+KBUILD_VERBOSE ?= 1 # Variable belongs to our kBuild, not the linux one.
+VBOX_LNX_VERBOSE = $(if $(KBUILD_VERBOSE),1,)
+
+#
+# Compiler options
+#
+VBOXMOD_0_KFLAGS := -D__KERNEL__ -DMODULE -DRT_WITHOUT_PRAGMA_ONCE $(addprefix -D,$(VBOXMOD_DEFS))
+ifeq ($(BUILD_TARGET_ARCH),amd64)
+VBOXMOD_0_KFLAGS += -DRT_ARCH_AMD64
+else
+VBOXMOD_0_KFLAGS += -DRT_ARCH_X86
+endif
+
+ifeq ($(BUILD_TYPE),debug)
+# The -Wno-array-bounds is because of a bug in gcc 4.something, see
+# https://sourceware.org/bugzilla/show_bug.cgi?id=10001
+ VBOXMOD_0_KFLAGS += -DDEBUG -DDEBUG_$(subst $(subst _, ,_),_,$(USERNAME)) -DDEBUG_USERNAME=$(subst $(subst _, ,_),_,$(USERNAME))
+ ifeq ($(shell expr $(KERN_VER) : '[23]\.'),0)
+ VBOXMOD_0_KFLAGS += -Werror -Wall -Wno-array-bounds
+ endif
+endif
+
+ifeq ($(VBOX_KERN_GROKS_EXTMOD),)
+#
+# Pre 2.6.6
+#
+# Note: While pre 2.6.6 kernels could also do "proper" builds from kbuild, the
+# make script needed to support it was somewhat different from 2.6. Since this
+# script works and pre-2.6.6 is not a moving target we will not try do do things
+# the "proper" way.
+#
+VBOXMOD_EXT := o
+
+ ifeq ($(BUILD_TARGET_ARCH),amd64)
+VBOXMOD_0_KFLAGS += -mcmodel=kernel
+ endif
+ ifeq ($(KERN_VERSION),24)
+VBOXMOD_0_KFLAGS += -DVBOX_LINUX_2_4
+ endif
+
+CFLAGS := -O2 $(VBOXMOD_CFLAGS) $(addprefix -I,$(KERN_INCL) $(VBOXMOD_INCL)) $(VBOXMOD_0_KFLAGS) $(KDEBUG)
+
+# 2.4 Module linking
+$(VBOXMOD_0_TARGET).$(VBOXMOD_EXT): $(VBOXMOD_OBJS)
+ $(LD) -o $@ -r $(VBOXMOD_OBJS)
+
+all: $(VBOXMOD_0_TARGET)
+$(VBOXMOD_0_TARGET): $(VBOXMOD_0_TARGET).$(VBOXMOD_EXT)
+
+install: $(VBOXMOD_0_TARGET)
+ @mkdir -p $(MODULE_DIR); \
+ install -m 0644 -o root -g root $(VBOXMOD_0_TARGET).$(VBOXMOD_EXT) $(MODULE_DIR); \
+ PATH="$(PATH):/bin:/sbin" depmod -a; sync
+
+clean:
+ for f in $(sort $(dir $(VBOXMOD_OBJS))); do rm -f $$f/*.o $$f/.*.cmd $$f/.*.flags; done
+ rm -rf .$(VBOXMOD_NAME)* .tmp_ver* $(VBOXMOD_NAME).* Modules.symvers modules.order
+
+.PHONY: all $(VBOXMOD_0_TARGET) install clean
+
+else # VBOX_KERN_GROKS_EXTMOD
+#
+# 2.6.6 and later
+#
+VBOXMOD_EXT := ko
+
+# build defs
+EXTRA_CFLAGS += $(VBOXMOD_CFLAGS) $(addprefix -I,$(KERN_INCL) $(VBOXMOD_INCL)) $(VBOXMOD_0_KFLAGS) $(KDEBUG)
+$(VBOXMOD_0_TARGET)-y := $(VBOXMOD_OBJS)
+obj-m += $(VBOXMOD_0_TARGET).o
+
+# Trigger parallel make job.
+JOBS := $(shell (getconf _NPROCESSORS_ONLN || grep -Ec '^processor|^CPU[0-9]' /proc/cpuinfo) 2>/dev/null)
+ ifeq ($(JOBS),0)
+ override JOBS := 1
+ endif
+
+# rules:
+all: $(VBOXMOD_0_TARGET)
+
+# OL/UEK: disable module signing for external modules -- we don't have any private key
+$(VBOXMOD_0_TARGET):
+ifneq ($(VBOX_KERN_GROKS_SUBDIRS),)
+ $(MAKE) V=$(VBOX_LNX_VERBOSE) CONFIG_MODULE_SIG= -C $(KERN_DIR) SUBDIRS=$(CURDIR) SRCROOT=$(CURDIR) $(if $(JOBS),-j$(JOBS),) modules
+else
+ $(MAKE) V=$(VBOX_LNX_VERBOSE) CONFIG_MODULE_SIG= -C $(KERN_DIR) M=$(CURDIR) SRCROOT=$(CURDIR) $(if $(JOBS),-j$(JOBS),) modules
+endif
+
+install: $(VBOXMOD_0_TARGET)
+ifneq ($(VBOX_KERN_GROKS_SUBDIRS),)
+ $(MAKE) V=$(VBOX_LNX_VERBOSE) CONFIG_MODULE_SIG= -C $(KERN_DIR) SUBDIRS=$(CURDIR) SRCROOT=$(CURDIR) INSTALL_MOD_PATH=$(INSTALL_MOD_PATH) INSTALL_MOD_DIR=$(INSTALL_MOD_DIR) modules_install
+else
+ $(MAKE) V=$(VBOX_LNX_VERBOSE) CONFIG_MODULE_SIG= -C $(KERN_DIR) M=$(CURDIR) SRCROOT=$(CURDIR) INSTALL_MOD_PATH=$(INSTALL_MOD_PATH) INSTALL_MOD_DIR=$(INSTALL_MOD_DIR) modules_install
+endif
+
+modules_install: install
+
+clean:
+ifneq ($(VBOX_KERN_GROKS_SUBDIRS),)
+ $(MAKE) V=$(VBOX_LNX_VERBOSE) CONFIG_MODULE_SIG= -C $(KERN_DIR) SUBDIRS=$(CURDIR) SRCROOT=$(CURDIR) clean
+else
+ $(MAKE) V=$(VBOX_LNX_VERBOSE) CONFIG_MODULE_SIG= -C $(KERN_DIR) M=$(CURDIR) SRCROOT=$(CURDIR) clean
+endif
+
+.PHONY: all $(VBOXMOD_0_TARGET) install modules_install clean
+endif # VBOX_KERN_GROKS_EXTMOD
+
diff --git a/ubuntu/vbox/vboxvideo/Makefile.include.header b/ubuntu/vbox/vboxguest/Makefile-header.gmk
similarity index 51%
rename from ubuntu/vbox/vboxvideo/Makefile.include.header
rename to ubuntu/vbox/vboxguest/Makefile-header.gmk
index 8b0434bd508e..456d2789ec30 100644
--- a/ubuntu/vbox/vboxvideo/Makefile.include.header
+++ b/ubuntu/vbox/vboxguest/Makefile-header.gmk
@@ -1,4 +1,4 @@
-# $Id: Makefile.include.header $
+# $Id: Makefile-header.gmk $
## @file
# VirtualBox Guest Additions kernel module Makefile, common parts.
#
@@ -26,16 +26,15 @@
# build as part of the Guest Additions. The intended way of doing this is as
# follows:
#
-# # Linux kbuild sets this to our source directory if we are called from
-# # there
+# # Linux kbuild sets this to our source directory if we are called from there
# obj ?= $(CURDIR)
-# include $(obj)/Makefile.include.header
-# MOD_NAME = <name of the module to be built, without extension>
-# MOD_OBJS = <list of object files which should be included>
-# MOD_DEFS = <any additional defines which this module needs>
-# MOD_INCL = <any additional include paths which this module needs>
-# MOD_CFLAGS = <any additional CFLAGS which this module needs>
-# include $(obj)/Makefile.include.footer
+# include $(obj)/Makefile-header.gmk
+# VBOXMOD_NAME = <name of the module to be built, without extension>
+# VBOXMOD_OBJS = <list of object files which should be included>
+# VBOXMOD_DEFS = <any additional defines which this module needs>
+# VBOXMOD_INCL = <any additional include paths which this module needs>
+# VBOXMOD_CFLAGS = <any additional CFLAGS which this module needs>
+# include $(obj)/Makefile-footer.gmk
#
# The kmk kBuild define KBUILD_TARGET_ARCH is available.
#
@@ -79,7 +78,9 @@ ifeq ($(BUILD_TYPE),)
BUILD_TYPE := release
else
ifneq ($(BUILD_TYPE),release)
- $(warning Using BUILD_TYPE='$(BUILD_TYPE)' from the $(origin BUILD_TYPE).)
+ ifndef VBOX_KERN_QUIET
+ $(warning Using BUILD_TYPE='$(BUILD_TYPE)' from the $(origin BUILD_TYPE).)
+ endif
endif
endif
ifeq ($(USERNAME),)
@@ -108,14 +109,35 @@ ifeq ($(KERNELRELEASE),)
$(error The kernel build folder path must end in <version>/build, or the variable KERN_VER must be set)
endif
endif
- KERN_VER ?= $(shell uname -r)
+ KERN_VER ?= $(shell uname -r)
endif
- # guess kernel major version (24 or later)
- ifeq ($(shell if grep '"2\.4\.' /lib/modules/$(KERN_VER)/build/include/linux/version.h > /dev/null 2>&1; then echo yes; fi),yes)
+ ifeq ($(KERN_DIR),)
+KERN_DIR := $(srctree)
+ endif
+
+ # Is this 2.4 or < 2.6.6? The UTS_RELEASE "2.x.y.z" define is present in the header until 2.6.1x something.
+ ifeq ($(shell if grep '"2\.4\.' $(KERN_DIR)/include/linux/version.h > /dev/null 2>&1; then echo yes; fi),yes)
KERN_VERSION := 24
+ VBOX_KERN_GROKS_EXTMOD :=
else
KERN_VERSION := 26
+ VBOX_KERN_GROKS_EXTMOD := yes
+ ifeq ($(shell if grep '"2\.6\.[012345][."]' $(KERN_DIR)/include/linux/version.h > /dev/null 2>&1; then echo yes; fi),yes)
+ VBOX_KERN_GROKS_EXTMOD :=
+ endif
+ VBOX_KERN_GROKS_SUBDIRS :=
+ ifeq ($(shell if grep '"[432]\.' $(KERN_DIR)/include/linux/version.h > /dev/null 2>&1; then echo yes; fi),yes)
+ VBOX_KERN_GROKS_SUBDIRS := yes
+ endif
+ endif
+
+ #
+ # Hack for Ubuntu 4.10 where we determine 2.6.8.1-3-generic-amd64 here, but the
+ # the next invocation (M/SUBDIR) ends up with KERNELRELEASE=2.6.8.1-3.
+ #
+ ifeq ($(shell if grep '"[2]\.' $(KERN_DIR)/include/linux/version.h > /dev/null 2>&1; then echo yes; fi),yes)
+ export KERN_VER KERN_DIR
endif
else # neq($(KERNELRELEASE),)
@@ -125,22 +147,39 @@ else # neq($(KERNELRELEASE),)
#
# guess kernel version (24 or 26)
- ifeq ($(shell if echo "$(VERSION).$(PATCHLEVEL)." | grep '2\.4\.' > /dev/null; then echo yes; fi),yes)
+ ifeq ($(VERSION).$(PATCHLEVEL),2.4)
KERN_VERSION := 24
+ VBOX_KERN_GROKS_EXTMOD :=
else
KERN_VERSION := 26
+ VBOX_KERN_GROKS_EXTMOD := yes
+ ifeq ($(VERSION).$(PATCHLEVEL),2.6)
+ ifeq ($(findstring @$(SUBLEVEL)@, at 0@1 at 2@3 at 4@5@),@$(SUBLEVEL)@)
+ VBOX_KERN_GROKS_EXTMOD :=
+ endif
+ endif
+ VBOX_KERN_GROKS_SUBDIRS :=
+ ifeq ($(VERSION),2)
+ VBOX_KERN_GROKS_SUBDIRS := yes
+ endif
+ ifeq ($(VERSION),3)
+ VBOX_KERN_GROKS_SUBDIRS := yes
+ endif
+ ifeq ($(VERSION),4)
+ VBOX_KERN_GROKS_SUBDIRS := yes
+ endif
endif
KERN_VER := $(KERNELRELEASE)
+ ifeq ($(KERN_DIR),)
+KERN_DIR := $(srctree)
+ endif
endif # neq($(KERNELRELEASE),)
# Kernel build folder
-ifeq ($(KERN_DIR),)
-KERN_DIR := $(srctree)
-endif
ifneq ($(shell if test -d $(KERN_DIR); then echo yes; fi),yes)
- $(error Error: unable to find the headers of the Linux kernel to build against. \
+ $(error Error: unable to find the headers of the Linux kernel to build against (KERN_DIR=$(KERN_DIR)). \
Specify KERN_VER=<version> (currently $(KERN_VER)) and run Make again)
endif
# Kernel include folder
@@ -149,12 +188,59 @@ KERN_INCL := $(KERN_DIR)/include
INSTALL_MOD_DIR ?= misc
MODULE_DIR := $(INSTALL_MOD_PATH)/lib/modules/$(KERN_VER)/$(INSTALL_MOD_DIR)
+#
+# The KBUILD_EXTMOD variable is used by 2.6.6 and later when build external
+# modules (see https://lwn.net/Articles/79984/). It will be set to SUBDIRS
+# or M by the linux kernel makefile. We fake it here for older kernels.
+#
+## @todo Drop this KBUILD_EXTMOD glue once it has been removed from all our makefiles (see sharedfolders).
+ifndef CURDIR # for make < v3.79
+ CURDIR := $(shell pwd)
+endif
+ifndef KBUILD_EXTMOD
+ KBUILD_EXTMOD := $(CURDIR)
+endif
+
+
+# For VBOX_GCC_CHECK_CC
+VBOX_CLOSEPAR := )
+VBOX_DOLLAR := $$
+## Modified VBOX_GCC_CHECK_EX_CC_CXX macro from /Config.kmk.
+# @param 1 The option to test for.
+# @param 2 The return value when supported.
+# @param 3 The return value when NOT supported.
+VBOX_GCC_CHECK_CC = $(shell \
+ > /tmp/$(VBOX_DOLLAR)$(VBOX_DOLLAR).check.c; \
+ if $(CC) $(subst -Wno-,-W,$(1)) -Werror -c -o /dev/null /tmp/$(VBOX_DOLLAR)$(VBOX_DOLLAR).check.c > /dev/null 2>&1; then \
+ case "`LC_ALL=C $(CC) $(subst -Wno-,-W,$(1)) -Werror -c -o /dev/null /tmp/$(VBOX_DOLLAR)$(VBOX_DOLLAR).check.c 2>&1`" in \
+ "error: unknown warning option"*$(VBOX_CLOSEPAR) echo "$(3)";; \
+ *$(VBOX_CLOSEPAR) echo "$(2)";; \
+ esac; \
+ else echo "$(3)"; fi; \
+ rm -f /tmp/$(VBOX_DOLLAR)$(VBOX_DOLLAR).check.c; )
+
+#
+# Guess the module directory ASSUMING that this file is located in that directory.
+# Note! The special MAKEFILE_LIST variable was introduced in GNU make 3.80.
+#
+ifdef MAKEFILE_LIST
+ VBOX_MODULE_SRC_DIR := $(dir $(word $(words $(MAKEFILE_LIST)),$(MAKEFILE_LIST)))
+else
+ VBOX_MODULE_SRC_DIR := $(CURDIR)/
+endif
+
+
# debug - show guesses.
ifdef DEBUG
+ ifndef VBOX_KERN_QUIET
$(warning dbg: INSTALL_MOD_PATH = $(INSTALL_MOD_PATH))
$(warning dbg: INSTALL_MOD_DIR = $(INSTALL_MOD_DIR))
$(warning dbg: KERN_DIR = $(KERN_DIR))
$(warning dbg: KERN_INCL = $(KERN_INCL))
$(warning dbg: KERN_VERSION = $(KERN_VERSION))
$(warning dbg: MODULE_DIR = $(MODULE_DIR))
+$(warning dbg: KBUILD_EXTMOD = $(KBUILD_EXTMOD))
+$(warning dbg: VBOX_MODULE_SRC_DIR = $(VBOX_MODULE_SRC_DIR))
+ endif
endif
+
diff --git a/ubuntu/vbox/vboxguest/Makefile.include.footer b/ubuntu/vbox/vboxguest/Makefile.include.footer
deleted file mode 100644
index 7e04c3153eaa..000000000000
--- a/ubuntu/vbox/vboxguest/Makefile.include.footer
+++ /dev/null
@@ -1,117 +0,0 @@
-# $Id: Makefile.include.footer $
-## @file
-# VirtualBox Guest Additions kernel module Makefile, common parts.
-#
-# See Makefile.include.header for details of how to use this.
-#
-
-#
-# Copyright (C) 2006-2019 Oracle Corporation
-#
-# This file is part of VirtualBox Open Source Edition (OSE), as
-# available from http://www.virtualbox.org. This file is free software;
-# you can redistribute it and/or modify it under the terms of the GNU
-# General Public License (GPL) as published by the Free Software
-# Foundation, in version 2 as it comes in the "COPYING" file of the
-# VirtualBox OSE distribution. VirtualBox OSE is distributed in the
-# hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
-#
-
-# override is required by the Debian guys
-override MODULE = $(MOD_NAME)
-OBJS = $(MOD_OBJS)
-
-KBUILD_VERBOSE ?= 1
-LINUX_VERBOSE = $(if $(KBUILD_VERBOSE),1,)
-
-#
-# Compiler options
-#
-ifndef INCL
- INCL := $(addprefix -I,$(KERN_INCL) $(EXTRA_INCL))
- ifndef KBUILD_EXTMOD
- KBUILD_EXTMOD := $(shell pwd)
- endif
- INCL += $(MOD_INCL)
- export INCL
-endif
-KFLAGS := -D__KERNEL__ -DMODULE -DRT_WITHOUT_PRAGMA_ONCE $(MOD_DEFS)
-ifeq ($(BUILD_TYPE),debug)
-# The -Wno-array-bounds is because of a bug in gcc 4.something, see
-# https://sourceware.org/bugzilla/show_bug.cgi?id=10001
- KFLAGS += -DDEBUG -DDEBUG_$(subst $(subst _, ,_),_,$(USERNAME)) -DDEBUG_USERNAME=$(subst $(subst _, ,_),_,$(USERNAME))
- ifeq ($(shell expr $(KERN_VER) : '[23]\.'),0)
- KFLAGS += -Werror -Wall -Wno-array-bounds
- endif
-endif
-
-ifeq ($(KERN_VERSION), 24)
-#
-# 2.4
-#
-
-# Note: while 2.4 kernels could also do "proper" builds from kbuild, the make
-# script needed to support it was somewhat different from 2.6. Since this
-# script works and 2.4 is not a moving target we will not try do do things the
-# "proper" way.
-
-ifeq ($(BUILD_TARGET_ARCH),amd64)
- KFLAGS += -mcmodel=kernel
-endif
-
-CFLAGS := -O2 -DVBOX_LINUX_2_4 $(MOD_CFLAGS) $(INCL) $(KFLAGS) $(MOD_EXTRA) $(KDEBUG)
-MODULE_EXT := o
-
-# 2.4 Module linking
-$(MODULE).o: $(OBJS)
- $(LD) -o $@ -r $(OBJS)
-
-.PHONY: $(MODULE)
-all: $(MODULE)
-$(MODULE): $(MODULE).o
-
-install: $(MODULE)
- @mkdir -p $(MODULE_DIR); \
- install -m 0644 -o root -g root $(MODULE).$(MODULE_EXT) $(MODULE_DIR); \
- PATH="$(PATH):/bin:/sbin" depmod -a; sync
-
-clean:
- for f in $(sort $(dir $(OBJS))); do rm -f $$f/*.o $$f/.*.cmd $$f/.*.flags; done
- rm -rf .$(MOD_NAME)* .tmp_ver* $(MOD_NAME).* Modules.symvers modules.order
-
-else # ! $(KERN_VERSION), 24
-#
-# 2.6 and later
-#
-
-MODULE_EXT := ko
-
-$(MODULE)-y := $(OBJS)
-
-# build defs
-EXTRA_CFLAGS += $(MOD_CFLAGS) $(INCL) $(KFLAGS) $(MOD_EXTRA) $(KDEBUG)
-
-.PHONY: $(MODULE)
-all: $(MODULE)
-
-obj-m += $(MODULE).o
-
-JOBS := $(shell (getconf _NPROCESSORS_ONLN || grep -Ec '^processor|^CPU[0-9]' /proc/cpuinfo) 2>/dev/null)
-ifeq ($(JOBS),0)
- override JOBS := 1
-endif
-
-# OL/UEK: disable module signing for external modules -- we don't have any private key
-$(MODULE):
- $(MAKE) V=$(LINUX_VERBOSE) CONFIG_MODULE_SIG= -C $(KERN_DIR) SUBDIRS=$(CURDIR) SRCROOT=$(CURDIR) $(if $(JOBS),-j$(JOBS),) modules
-
-install: $(MODULE)
- $(MAKE) V=$(LINUX_VERBOSE) CONFIG_MODULE_SIG= -C $(KERN_DIR) SUBDIRS=$(CURDIR) SRCROOT=$(CURDIR) INSTALL_MOD_PATH=$(INSTALL_MOD_PATH) INSTALL_MOD_DIR=$(INSTALL_MOD_DIR) modules_install
-
-modules_install: install
-
-clean:
- $(MAKE) V=$(LINUX_VERBOSE) CONFIG_MODULE_SIG= -C $(KERN_DIR) SUBDIRS=$(CURDIR) SRCROOT=$(CURDIR) clean
-
-.PHONY: $(MODULE) install modules_install clean
-endif
diff --git a/ubuntu/vbox/vboxguest/VBoxGuest-linux.c b/ubuntu/vbox/vboxguest/VBoxGuest-linux.c
index af4d3391808f..1b69ca6d8b98 100644
--- a/ubuntu/vbox/vboxguest/VBoxGuest-linux.c
+++ b/ubuntu/vbox/vboxguest/VBoxGuest-linux.c
@@ -1,4 +1,4 @@
-/* $Rev: 127855 $ */
+/* $Rev: 129380 $ */
/** @file
* VBoxGuest - Linux specifics.
*
@@ -100,8 +100,8 @@
*********************************************************************************************************************************/
static void vgdrvLinuxTermPci(struct pci_dev *pPciDev);
static int vgdrvLinuxProbePci(struct pci_dev *pPciDev, const struct pci_device_id *id);
-static int vgdrvLinuxModInit(void);
-static void vgdrvLinuxModExit(void);
+static int __init vgdrvLinuxModInit(void);
+static void __exit vgdrvLinuxModExit(void);
static int vgdrvLinuxOpen(struct inode *pInode, struct file *pFilp);
static int vgdrvLinuxRelease(struct inode *pInode, struct file *pFilp);
#ifdef HAVE_UNLOCKED_IOCTL
diff --git a/ubuntu/vbox/vboxguest/VBoxGuest.c b/ubuntu/vbox/vboxguest/VBoxGuest.c
index 63840d420b3c..8b8bc454d709 100644
--- a/ubuntu/vbox/vboxguest/VBoxGuest.c
+++ b/ubuntu/vbox/vboxguest/VBoxGuest.c
@@ -2081,7 +2081,7 @@ static int vgdrvIoCtl_GetVMMDevIoInfo(PVBOXGUESTDEVEXT pDevExt, PVBGLIOCGETVMMDE
* @param pDevExt The device extension.
* @param pNotify The new callback information.
*/
-int vgdrvIoCtl_SetMouseNotifyCallback(PVBOXGUESTDEVEXT pDevExt, PVBGLIOCSETMOUSENOTIFYCALLBACK pNotify)
+static int vgdrvIoCtl_SetMouseNotifyCallback(PVBOXGUESTDEVEXT pDevExt, PVBGLIOCSETMOUSENOTIFYCALLBACK pNotify)
{
LogFlow(("VBOXGUEST_IOCTL_SET_MOUSE_NOTIFY_CALLBACK: pfnNotify=%p pvUser=%p\n", pNotify->u.In.pfnNotify, pNotify->u.In.pvUser));
@@ -2321,11 +2321,9 @@ static int vgdrvCheckIfVmmReqIsAllowed(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSI
case VMMDevReq_HGCMConnect:
case VMMDevReq_HGCMDisconnect:
# ifdef VBOX_WITH_64_BITS_GUESTS
- case VMMDevReq_HGCMCall32:
case VMMDevReq_HGCMCall64:
-# else
- case VMMDevReq_HGCMCall:
-# endif /* VBOX_WITH_64_BITS_GUESTS */
+# endif
+ case VMMDevReq_HGCMCall32:
case VMMDevReq_HGCMCancel:
case VMMDevReq_HGCMCancel2:
#endif /* VBOX_WITH_HGCM */
@@ -2442,6 +2440,7 @@ static int vgdrvCheckIfVmmReqIsAllowed(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSI
case kLevel_TrustedUsers:
if (pSession->fUserSession)
break;
+ RT_FALL_THRU();
case kLevel_AllUsers:
return VINF_SUCCESS;
}
diff --git a/ubuntu/vbox/vboxguest/VBoxGuestR0LibGenericRequest.c b/ubuntu/vbox/vboxguest/VBoxGuestR0LibGenericRequest.c
index 1e559e8bcaeb..9391957b4b03 100644
--- a/ubuntu/vbox/vboxguest/VBoxGuestR0LibGenericRequest.c
+++ b/ubuntu/vbox/vboxguest/VBoxGuestR0LibGenericRequest.c
@@ -88,11 +88,9 @@ DECLR0VBGL(int) VbglGR0Verify(const VMMDevRequestHeader *pReq, size_t cbReq)
if ( pReq->requestType == VMMDevReq_ChangeMemBalloon
|| pReq->requestType == VMMDevReq_GetDisplayChangeRequestMulti
#ifdef VBOX_WITH_64_BITS_GUESTS
- || pReq->requestType == VMMDevReq_HGCMCall32
|| pReq->requestType == VMMDevReq_HGCMCall64
-#else
- || pReq->requestType == VMMDevReq_HGCMCall
#endif
+ || pReq->requestType == VMMDevReq_HGCMCall32
|| pReq->requestType == VMMDevReq_RegisterSharedModule
|| pReq->requestType == VMMDevReq_ReportGuestUserState
|| pReq->requestType == VMMDevReq_LogString
diff --git a/ubuntu/vbox/vboxguest/VBoxGuestR0LibHGCMInternal.c b/ubuntu/vbox/vboxguest/VBoxGuestR0LibHGCMInternal.c
index 4eb46c2a44b3..986e1745c7a0 100644
--- a/ubuntu/vbox/vboxguest/VBoxGuestR0LibHGCMInternal.c
+++ b/ubuntu/vbox/vboxguest/VBoxGuestR0LibHGCMInternal.c
@@ -493,7 +493,7 @@ static uint32_t vbglR0HGCMInternalLinAddrTypeToPageListFlags(HGCMFunctionParamet
case VMMDevHGCMParmType_LinAddr_Locked_Out:
return VBOX_HGCM_F_PARM_DIRECTION_FROM_HOST;
- default: AssertFailed();
+ default: AssertFailed(); RT_FALL_THRU();
case VMMDevHGCMParmType_LinAddr:
case VMMDevHGCMParmType_LinAddr_Locked:
return VBOX_HGCM_F_PARM_DIRECTION_BOTH;
diff --git a/ubuntu/vbox/vboxguest/VBoxGuestR0LibPhysHeap.c b/ubuntu/vbox/vboxguest/VBoxGuestR0LibPhysHeap.c
index c409530e8d61..0cd11db02da0 100644
--- a/ubuntu/vbox/vboxguest/VBoxGuestR0LibPhysHeap.c
+++ b/ubuntu/vbox/vboxguest/VBoxGuestR0LibPhysHeap.c
@@ -350,7 +350,7 @@ static VBGLPHYSHEAPBLOCK *vbglPhysHeapChunkAlloc (uint32_t cbSize)
}
-void vbglPhysHeapChunkDelete (VBGLPHYSHEAPCHUNK *pChunk)
+static void vbglPhysHeapChunkDelete (VBGLPHYSHEAPCHUNK *pChunk)
{
char *p;
VBGL_PH_ASSERT(pChunk != NULL);
@@ -403,7 +403,7 @@ void vbglPhysHeapChunkDelete (VBGLPHYSHEAPCHUNK *pChunk)
DECLR0VBGL(void *) VbglR0PhysHeapAlloc (uint32_t cbSize)
{
- VBGLPHYSHEAPBLOCK *pBlock, *iter;
+ VBGLPHYSHEAPBLOCK *pBlock, *pIter;
int rc = vbglPhysHeapEnter ();
if (RT_FAILURE(rc))
@@ -411,41 +411,61 @@ DECLR0VBGL(void *) VbglR0PhysHeapAlloc (uint32_t cbSize)
dumpheap ("pre alloc");
- pBlock = NULL;
-
- /* If there are free blocks in the heap, look at them. */
- iter = g_vbgldata.pFreeBlocksHead;
-
- /* There will be not many blocks in the heap, so
- * linear search would be fast enough.
+ /*
+ * Search the free list. We do this in linear fashion as we don't expect
+ * there to be many blocks in the heap.
*/
- while (iter)
+ pBlock = NULL;
+ if (cbSize <= PAGE_SIZE / 4 * 3)
{
- if (iter->cbDataSize == cbSize)
- {
- /* exact match */
- pBlock = iter;
- break;
- }
+ /* Smaller than 3/4 page: Prefer a free block that can keep the request within a single page,
+ so HGCM processing in VMMDev can use page locks instead of several reads and writes. */
- /* Looking for a free block with nearest size */
- if (iter->cbDataSize > cbSize)
- {
- if (pBlock)
+ VBGLPHYSHEAPBLOCK *pFallback = NULL;
+ for (pIter = g_vbgldata.pFreeBlocksHead; pIter != NULL; pIter = pIter->pNext)
+ if (pIter->cbDataSize >= cbSize)
{
- if (iter->cbDataSize < pBlock->cbDataSize)
+ if (pIter->cbDataSize == cbSize)
{
- pBlock = iter;
+ if (PAGE_SIZE - ((uintptr_t)vbglPhysHeapBlock2Data(pIter) & PAGE_OFFSET_MASK) >= cbSize)
+ {
+ pBlock = pIter;
+ break;
+ }
+ pFallback = pIter;
+ }
+ else
+ {
+ if (!pFallback || pIter->cbDataSize < pFallback->cbDataSize)
+ pFallback = pIter;
+ if (PAGE_SIZE - ((uintptr_t)vbglPhysHeapBlock2Data(pIter) & PAGE_OFFSET_MASK) >= cbSize)
+ if (!pBlock || pIter->cbDataSize < pBlock->cbDataSize)
+ pBlock = pIter;
}
}
- else
+
+ if (!pBlock)
+ pBlock = pFallback;
+ }
+ else
+ {
+ /* Large than 3/4 page: Find smallest free list match. */
+
+ for (pIter = g_vbgldata.pFreeBlocksHead; pIter != NULL; pIter = pIter->pNext)
+ if (pIter->cbDataSize >= cbSize)
{
- pBlock = iter;
- }
- }
+ if (pIter->cbDataSize == cbSize)
+ {
+ /* Exact match - we're done! */
+ pBlock = pIter;
+ break;
+ }
- iter = iter->pNext;
+ /* Looking for a free block with nearest size. */
+ if (!pBlock || pIter->cbDataSize < pBlock->cbDataSize)
+ pBlock = pIter;
+ }
}
if (!pBlock)
@@ -469,17 +489,17 @@ DECLR0VBGL(void *) VbglR0PhysHeapAlloc (uint32_t cbSize)
if (pBlock->cbDataSize > 2*(cbSize + sizeof (VBGLPHYSHEAPBLOCK)))
{
/* Data will occupy less than a half of the block,
- * the block should be split.
+ * split off the tail end into a new free list entry.
*/
- iter = (VBGLPHYSHEAPBLOCK *)((char *)pBlock + sizeof (VBGLPHYSHEAPBLOCK) + cbSize);
+ pIter = (VBGLPHYSHEAPBLOCK *)((char *)pBlock + sizeof (VBGLPHYSHEAPBLOCK) + cbSize);
- /* Init the new 'iter' block, initialized blocks are always marked as free. */
- vbglPhysHeapInitBlock (iter, pBlock->pChunk, pBlock->cbDataSize - cbSize - sizeof (VBGLPHYSHEAPBLOCK));
+ /* Init the new 'pIter' block, initialized blocks are always marked as free. */
+ vbglPhysHeapInitBlock (pIter, pBlock->pChunk, pBlock->cbDataSize - cbSize - sizeof (VBGLPHYSHEAPBLOCK));
pBlock->cbDataSize = cbSize;
- /* Insert the new 'iter' block after the 'pBlock' in the free list */
- vbglPhysHeapInsertBlock (pBlock, iter);
+ /* Insert the new 'pIter' block after the 'pBlock' in the free list */
+ vbglPhysHeapInsertBlock (pBlock, pIter);
}
/* Exclude pBlock from free list */
diff --git a/ubuntu/vbox/vboxguest/common/err/RTErrConvertFromErrno.c b/ubuntu/vbox/vboxguest/common/err/RTErrConvertFromErrno.c
index 302e6bf89d7d..ef13d31bbeb9 100644
--- a/ubuntu/vbox/vboxguest/common/err/RTErrConvertFromErrno.c
+++ b/ubuntu/vbox/vboxguest/common/err/RTErrConvertFromErrno.c
@@ -36,10 +36,10 @@
#include <iprt/errno.h>
-RTDECL(int) RTErrConvertFromErrno(unsigned uNativeCode)
+RTDECL(int) RTErrConvertFromErrno(int iNativeCode)
{
/* very fast check for no error. */
- if (uNativeCode == 0)
+ if (iNativeCode == 0)
return VINF_SUCCESS;
/*
@@ -51,7 +51,7 @@ RTDECL(int) RTErrConvertFromErrno(unsigned uNativeCode)
* This switch is arranged like the Linux i386 errno.h! This switch is mirrored
* by RTErrConvertToErrno.
*/
- switch (uNativeCode)
+ switch (iNativeCode)
{ /* Linux number */
#ifdef EPERM
case EPERM: return VERR_ACCESS_DENIED; /* 1 */
@@ -449,7 +449,7 @@ RTDECL(int) RTErrConvertFromErrno(unsigned uNativeCode)
# endif
#endif
default:
- AssertLogRelMsgFailed(("Unhandled error code %d\n", uNativeCode));
+ AssertLogRelMsgFailed(("Unhandled error code %d\n", iNativeCode));
return VERR_UNRESOLVED_ERROR;
}
}
diff --git a/ubuntu/vbox/vboxguest/common/log/log.c b/ubuntu/vbox/vboxguest/common/log/log.c
index b7ce1365412a..343e4a07c638 100644
--- a/ubuntu/vbox/vboxguest/common/log/log.c
+++ b/ubuntu/vbox/vboxguest/common/log/log.c
@@ -781,8 +781,8 @@ static void rtLogRingBufFlush(PRTLOGGER pLogger)
}
-RTDECL(int) RTLogCreateExV(PRTLOGGER *ppLogger, uint32_t fFlags, const char *pszGroupSettings,
- const char *pszEnvVarBase, unsigned cGroups, const char * const *papszGroups,
+RTDECL(int) RTLogCreateExV(PRTLOGGER *ppLogger, uint32_t fFlags, const char *pszGroupSettings, const char *pszEnvVarBase,
+ unsigned cGroups, const char * const *papszGroups, uint32_t cMaxEntriesPerGroup,
uint32_t fDestFlags, PFNRTLOGPHASE pfnPhase, uint32_t cHistory,
uint64_t cbHistoryFileMax, uint32_t cSecsHistoryTimeSlot,
PRTERRINFO pErrInfo, const char *pszFilenameFmt, va_list args)
@@ -840,7 +840,7 @@ RTDECL(int) RTLogCreateExV(PRTLOGGER *ppLogger, uint32_t fFlags, const char *psz
pLogger->pInt->pacEntriesPerGroup = (uint32_t *)(pLogger->pInt + 1);
else
pLogger->pInt->pacEntriesPerGroup = NULL;
- pLogger->pInt->cMaxEntriesPerGroup = UINT32_MAX;
+ pLogger->pInt->cMaxEntriesPerGroup = cMaxEntriesPerGroup ? cMaxEntriesPerGroup : UINT32_MAX;
# ifdef IN_RING3
pLogger->pInt->pfnPhase = pfnPhase;
pLogger->pInt->hFile = NIL_RTFILE;
@@ -938,6 +938,22 @@ RTDECL(int) RTLogCreateExV(PRTLOGGER *ppLogger, uint32_t fFlags, const char *psz
pszValue = RTEnvGet(pszEnvVar);
if (pszValue)
RTLogGroupSettings(pLogger, pszValue);
+
+ /*
+ * Group limit.
+ */
+ strcpy(pszEnvVar + cchEnvVarBase, "_MAX_PER_GROUP");
+ pszValue = RTEnvGet(pszEnvVar);
+ if (pszValue)
+ {
+ uint32_t cMax;
+ rc = RTStrToUInt32Full(pszValue, 0, &cMax);
+ if (RT_SUCCESS(rc))
+ pLogger->pInt->cMaxEntriesPerGroup = cMax ? cMax : UINT32_MAX;
+ else
+ AssertMsgFailed(("Invalid group limit! %s=%s\n", pszEnvVar, pszValue));
+ }
+
}
# else /* !IN_RING3 */
RT_NOREF_PV(pszEnvVarBase); RT_NOREF_PV(pszFilenameFmt); RT_NOREF_PV(args);
@@ -1016,8 +1032,9 @@ RTDECL(int) RTLogCreate(PRTLOGGER *ppLogger, uint32_t fFlags, const char *pszGro
int rc;
va_start(args, pszFilenameFmt);
- rc = RTLogCreateExV(ppLogger, fFlags, pszGroupSettings, pszEnvVarBase, cGroups, papszGroups,
- fDestFlags, NULL /*pfnPhase*/, 0 /*cHistory*/, 0 /*cbHistoryFileMax*/, 0 /*cSecsHistoryTimeSlot*/,
+ rc = RTLogCreateExV(ppLogger, fFlags, pszGroupSettings, pszEnvVarBase,
+ cGroups, papszGroups, UINT32_MAX /*cMaxEntriesPerGroup*/, fDestFlags,
+ NULL /*pfnPhase*/, 0 /*cHistory*/, 0 /*cbHistoryFileMax*/, 0 /*cSecsHistoryTimeSlot*/,
NULL /*pErrInfo*/, pszFilenameFmt, args);
va_end(args);
return rc;
@@ -1025,8 +1042,8 @@ RTDECL(int) RTLogCreate(PRTLOGGER *ppLogger, uint32_t fFlags, const char *pszGro
RT_EXPORT_SYMBOL(RTLogCreate);
-RTDECL(int) RTLogCreateEx(PRTLOGGER *ppLogger, uint32_t fFlags, const char *pszGroupSettings,
- const char *pszEnvVarBase, unsigned cGroups, const char * const * papszGroups,
+RTDECL(int) RTLogCreateEx(PRTLOGGER *ppLogger, uint32_t fFlags, const char *pszGroupSettings, const char *pszEnvVarBase,
+ unsigned cGroups, const char * const *papszGroups, uint32_t cMaxEntriesPerGroup,
uint32_t fDestFlags, PFNRTLOGPHASE pfnPhase, uint32_t cHistory,
uint64_t cbHistoryFileMax, uint32_t cSecsHistoryTimeSlot,
PRTERRINFO pErrInfo, const char *pszFilenameFmt, ...)
@@ -1035,7 +1052,7 @@ RTDECL(int) RTLogCreateEx(PRTLOGGER *ppLogger, uint32_t fFlags, const char *pszG
int rc;
va_start(args, pszFilenameFmt);
- rc = RTLogCreateExV(ppLogger, fFlags, pszGroupSettings, pszEnvVarBase, cGroups, papszGroups,
+ rc = RTLogCreateExV(ppLogger, fFlags, pszGroupSettings, pszEnvVarBase, cGroups, papszGroups, cMaxEntriesPerGroup,
fDestFlags, pfnPhase, cHistory, cbHistoryFileMax, cSecsHistoryTimeSlot,
pErrInfo, pszFilenameFmt, args);
va_end(args);
@@ -3589,6 +3606,32 @@ DECLINLINE(char *) rtLogStPNCpyPad(char *pszDst, const char *pszSrc, size_t cchS
}
+/**
+ * stpncpy implementation for use in rtLogOutputPrefixed w/ padding.
+ *
+ * @returns Pointer to the destination buffer byte following the copied string.
+ * @param pszDst The destination buffer.
+ * @param pszSrc The source string.
+ * @param cchSrc The number of characters to copy from the
+ * source. Equal or less than string length.
+ * @param cchMinWidth The minimum field with, padd with spaces to
+ * reach this.
+ */
+DECLINLINE(char *) rtLogStPNCpyPad2(char *pszDst, const char *pszSrc, size_t cchSrc, size_t cchMinWidth)
+{
+ Assert(pszSrc);
+ Assert(strlen(pszSrc) >= cchSrc);
+
+ memcpy(pszDst, pszSrc, cchSrc);
+ pszDst += cchSrc;
+ do
+ *pszDst++ = ' ';
+ while (cchSrc++ < cchMinWidth);
+
+ return pszDst;
+}
+
+
/**
* Callback for RTLogFormatV which writes to the logger instance.
@@ -3898,28 +3941,28 @@ static DECLCALLBACK(size_t) rtLogOutputPrefixed(void *pv, const char *pachChars,
{
const unsigned fGrp = pLogger->afGroups[pArgs->iGroup != ~0U ? pArgs->iGroup : 0];
const char *pszGroup;
- size_t cch;
+ size_t cchGroup;
switch (pArgs->fFlags & fGrp)
{
- case 0: pszGroup = "--------"; cch = sizeof("--------") - 1; break;
- case RTLOGGRPFLAGS_ENABLED: pszGroup = "enabled" ; cch = sizeof("enabled" ) - 1; break;
- case RTLOGGRPFLAGS_LEVEL_1: pszGroup = "level 1" ; cch = sizeof("level 1" ) - 1; break;
- case RTLOGGRPFLAGS_LEVEL_2: pszGroup = "level 2" ; cch = sizeof("level 2" ) - 1; break;
- case RTLOGGRPFLAGS_LEVEL_3: pszGroup = "level 3" ; cch = sizeof("level 3" ) - 1; break;
- case RTLOGGRPFLAGS_LEVEL_4: pszGroup = "level 4" ; cch = sizeof("level 4" ) - 1; break;
- case RTLOGGRPFLAGS_LEVEL_5: pszGroup = "level 5" ; cch = sizeof("level 5" ) - 1; break;
- case RTLOGGRPFLAGS_LEVEL_6: pszGroup = "level 6" ; cch = sizeof("level 6" ) - 1; break;
- case RTLOGGRPFLAGS_LEVEL_7: pszGroup = "level 7" ; cch = sizeof("level 7" ) - 1; break;
- case RTLOGGRPFLAGS_LEVEL_8: pszGroup = "level 8" ; cch = sizeof("level 8" ) - 1; break;
- case RTLOGGRPFLAGS_LEVEL_9: pszGroup = "level 9" ; cch = sizeof("level 9" ) - 1; break;
- case RTLOGGRPFLAGS_LEVEL_10: pszGroup = "level 10"; cch = sizeof("level 10") - 1; break;
- case RTLOGGRPFLAGS_LEVEL_11: pszGroup = "level 11"; cch = sizeof("level 11") - 1; break;
- case RTLOGGRPFLAGS_LEVEL_12: pszGroup = "level 12"; cch = sizeof("level 12") - 1; break;
- case RTLOGGRPFLAGS_FLOW: pszGroup = "flow" ; cch = sizeof("flow" ) - 1; break;
- case RTLOGGRPFLAGS_WARN: pszGroup = "warn" ; cch = sizeof("warn" ) - 1; break;
- default: pszGroup = "????????"; cch = sizeof("????????") - 1; break;
+ case 0: pszGroup = "--------"; cchGroup = sizeof("--------") - 1; break;
+ case RTLOGGRPFLAGS_ENABLED: pszGroup = "enabled" ; cchGroup = sizeof("enabled" ) - 1; break;
+ case RTLOGGRPFLAGS_LEVEL_1: pszGroup = "level 1" ; cchGroup = sizeof("level 1" ) - 1; break;
+ case RTLOGGRPFLAGS_LEVEL_2: pszGroup = "level 2" ; cchGroup = sizeof("level 2" ) - 1; break;
+ case RTLOGGRPFLAGS_LEVEL_3: pszGroup = "level 3" ; cchGroup = sizeof("level 3" ) - 1; break;
+ case RTLOGGRPFLAGS_LEVEL_4: pszGroup = "level 4" ; cchGroup = sizeof("level 4" ) - 1; break;
+ case RTLOGGRPFLAGS_LEVEL_5: pszGroup = "level 5" ; cchGroup = sizeof("level 5" ) - 1; break;
+ case RTLOGGRPFLAGS_LEVEL_6: pszGroup = "level 6" ; cchGroup = sizeof("level 6" ) - 1; break;
+ case RTLOGGRPFLAGS_LEVEL_7: pszGroup = "level 7" ; cchGroup = sizeof("level 7" ) - 1; break;
+ case RTLOGGRPFLAGS_LEVEL_8: pszGroup = "level 8" ; cchGroup = sizeof("level 8" ) - 1; break;
+ case RTLOGGRPFLAGS_LEVEL_9: pszGroup = "level 9" ; cchGroup = sizeof("level 9" ) - 1; break;
+ case RTLOGGRPFLAGS_LEVEL_10: pszGroup = "level 10"; cchGroup = sizeof("level 10") - 1; break;
+ case RTLOGGRPFLAGS_LEVEL_11: pszGroup = "level 11"; cchGroup = sizeof("level 11") - 1; break;
+ case RTLOGGRPFLAGS_LEVEL_12: pszGroup = "level 12"; cchGroup = sizeof("level 12") - 1; break;
+ case RTLOGGRPFLAGS_FLOW: pszGroup = "flow" ; cchGroup = sizeof("flow" ) - 1; break;
+ case RTLOGGRPFLAGS_WARN: pszGroup = "warn" ; cchGroup = sizeof("warn" ) - 1; break;
+ default: pszGroup = "????????"; cchGroup = sizeof("????????") - 1; break;
}
- psz = rtLogStPNCpyPad(psz, pszGroup, 16, 8);
+ psz = rtLogStPNCpyPad2(psz, pszGroup, RT_MIN(cchGroup, 16), 8);
}
#define CCH_PREFIX_16 CCH_PREFIX_15 + 17
diff --git a/ubuntu/vbox/vboxguest/common/log/logcom.c b/ubuntu/vbox/vboxguest/common/log/logcom.c
index 68097e97e4e9..e4055c87770b 100644
--- a/ubuntu/vbox/vboxguest/common/log/logcom.c
+++ b/ubuntu/vbox/vboxguest/common/log/logcom.c
@@ -120,8 +120,8 @@ RTDECL(void) RTLogWriteCom(const char *pach, size_t cb)
const uint8_t *pu8;
for (pu8 = (const uint8_t *)pach; cb-- > 0; pu8++)
{
- register unsigned cMaxWait;
- register uint8_t u8;
+ unsigned cMaxWait;
+ uint8_t u8;
/* expand \n -> \r\n */
if (*pu8 == '\n')
diff --git a/ubuntu/vbox/vboxguest/common/table/avl_Base.cpp.h b/ubuntu/vbox/vboxguest/common/table/avl_Base.cpp.h
index e5a97a38357b..e0fe570b6c8e 100644
--- a/ubuntu/vbox/vboxguest/common/table/avl_Base.cpp.h
+++ b/ubuntu/vbox/vboxguest/common/table/avl_Base.cpp.h
@@ -266,7 +266,7 @@ DECLINLINE(void) KAVL_FN(Rebalance)(PKAVLSTACK pStack)
}
else
{
- register unsigned char uchHeight = (unsigned char)(KMAX(uchLeftHeight, uchRightHeight) + 1);
+ unsigned char uchHeight = (unsigned char)(KMAX(uchLeftHeight, uchRightHeight) + 1);
if (uchHeight == pNode->uchHeight)
break;
pNode->uchHeight = uchHeight;
@@ -298,12 +298,12 @@ DECLINLINE(void) KAVL_FN(Rebalance)(PKAVLSTACK pStack)
*/
KAVL_DECL(bool) KAVL_FN(Insert)(PPKAVLNODECORE ppTree, PKAVLNODECORE pNode)
{
- KAVLSTACK AVLStack;
- PPKAVLNODECORE ppCurNode = ppTree;
- register PKAVLNODECORE pCurNode;
- register KAVLKEY Key = pNode->Key; NOREF(Key);
+ KAVLSTACK AVLStack;
+ PPKAVLNODECORE ppCurNode = ppTree;
+ PKAVLNODECORE pCurNode;
+ KAVLKEY Key = pNode->Key; NOREF(Key);
#ifdef KAVL_RANGE
- register KAVLKEY KeyLast = pNode->KeyLast; NOREF(KeyLast);
+ KAVLKEY KeyLast = pNode->KeyLast; NOREF(KeyLast);
#endif
AVLStack.cEntries = 0;
@@ -398,9 +398,9 @@ KAVL_DECL(bool) KAVL_FN(Insert)(PPKAVLNODECORE ppTree, PKAVLNODECORE pNode)
*/
KAVL_DECL(PKAVLNODECORE) KAVL_FN(Remove)(PPKAVLNODECORE ppTree, KAVLKEY Key)
{
- KAVLSTACK AVLStack;
- PPKAVLNODECORE ppDeleteNode = ppTree;
- register PKAVLNODECORE pDeleteNode;
+ KAVLSTACK AVLStack;
+ PPKAVLNODECORE ppDeleteNode = ppTree;
+ PKAVLNODECORE pDeleteNode;
AVLStack.cEntries = 0;
@@ -425,9 +425,9 @@ KAVL_DECL(PKAVLNODECORE) KAVL_FN(Remove)(PPKAVLNODECORE ppTree, KAVLKEY Key)
if (pDeleteNode->pLeft != KAVL_NULL)
{
/* find the rightmost node in the left tree. */
- const unsigned iStackEntry = AVLStack.cEntries;
- PPKAVLNODECORE ppLeftLeast = &pDeleteNode->pLeft;
- register PKAVLNODECORE pLeftLeast = KAVL_GET_POINTER(ppLeftLeast);
+ const unsigned iStackEntry = AVLStack.cEntries;
+ PPKAVLNODECORE ppLeftLeast = &pDeleteNode->pLeft;
+ PKAVLNODECORE pLeftLeast = KAVL_GET_POINTER(ppLeftLeast);
while (pLeftLeast->pRight != KAVL_NULL)
{
diff --git a/ubuntu/vbox/vboxguest/common/table/avl_Get.cpp.h b/ubuntu/vbox/vboxguest/common/table/avl_Get.cpp.h
index 481df4481f0f..891e44fbaacf 100644
--- a/ubuntu/vbox/vboxguest/common/table/avl_Get.cpp.h
+++ b/ubuntu/vbox/vboxguest/common/table/avl_Get.cpp.h
@@ -37,7 +37,7 @@
*/
KAVL_DECL(PKAVLNODECORE) KAVL_FN(Get)(PPKAVLNODECORE ppTree, KAVLKEY Key)
{
- register PKAVLNODECORE pNode = KAVL_GET_POINTER_NULL(ppTree);
+ PKAVLNODECORE pNode = KAVL_GET_POINTER_NULL(ppTree);
if (pNode)
{
diff --git a/ubuntu/vbox/vboxguest/common/table/avl_GetBestFit.cpp.h b/ubuntu/vbox/vboxguest/common/table/avl_GetBestFit.cpp.h
index 4d61a4580ca2..977986d5c52e 100644
--- a/ubuntu/vbox/vboxguest/common/table/avl_GetBestFit.cpp.h
+++ b/ubuntu/vbox/vboxguest/common/table/avl_GetBestFit.cpp.h
@@ -43,7 +43,7 @@
*/
KAVL_DECL(PKAVLNODECORE) KAVL_FN(GetBestFit)(PPKAVLNODECORE ppTree, KAVLKEY Key, bool fAbove)
{
- register PKAVLNODECORE pNode = KAVL_GET_POINTER_NULL(ppTree);
+ PKAVLNODECORE pNode = KAVL_GET_POINTER_NULL(ppTree);
if (pNode)
{
PKAVLNODECORE pNodeLast = NULL;
diff --git a/ubuntu/vbox/vboxguest/common/table/avlpv.c b/ubuntu/vbox/vboxguest/common/table/avlpv.c
index 2c3dbd473cfa..5f3f3fbb3323 100644
--- a/ubuntu/vbox/vboxguest/common/table/avlpv.c
+++ b/ubuntu/vbox/vboxguest/common/table/avlpv.c
@@ -25,7 +25,7 @@
*/
#ifndef NOFILEID
-static const char szFileId[] = "Id: kAVLPVInt.c,v 1.5 2003/02/13 02:02:35 bird Exp $";
+/*static const char szFileId[] = "Id: kAVLPVInt.c,v 1.5 2003/02/13 02:02:35 bird Exp $";*/
#endif
diff --git a/ubuntu/vbox/vboxguest/include/VBox/VBoxGuest.h b/ubuntu/vbox/vboxguest/include/VBox/VBoxGuest.h
index 0b9d79f68840..e89e0984d8ea 100644
--- a/ubuntu/vbox/vboxguest/include/VBox/VBoxGuest.h
+++ b/ubuntu/vbox/vboxguest/include/VBox/VBoxGuest.h
@@ -422,7 +422,7 @@ AssertCompileSize(VBGLIOCIDCHGCMFASTCALL, /* 24 + 4 + 1 + 3 + 2*8 + 4 = 0x34 (52
\
(a_pCall)->header.header.size = (a_cbReq) - sizeof(VBGLIOCIDCHGCMFASTCALL); \
(a_pCall)->header.header.version = VBGLREQHDR_VERSION; \
- (a_pCall)->header.header.requestType= (ARCH_BITS == 32 ? VMMDevReq_HGCMCall32 : VMMDevReq_HGCMCall64); \
+ (a_pCall)->header.header.requestType= (ARCH_BITS == 64 ? VMMDevReq_HGCMCall64 : VMMDevReq_HGCMCall32); \
(a_pCall)->header.header.rc = VERR_INTERNAL_ERROR; \
(a_pCall)->header.header.reserved1 = 0; \
(a_pCall)->header.header.fRequestor = VMMDEV_REQUESTOR_KERNEL | VMMDEV_REQUESTOR_USR_DRV_OTHER \
diff --git a/ubuntu/vbox/vboxguest/include/VBox/VMMDev.h b/ubuntu/vbox/vboxguest/include/VBox/VMMDev.h
index 3212e9941a7f..5d5c42861d65 100644
--- a/ubuntu/vbox/vboxguest/include/VBox/VMMDev.h
+++ b/ubuntu/vbox/vboxguest/include/VBox/VMMDev.h
@@ -162,12 +162,17 @@ typedef enum VMMDevRequestType
#ifdef VBOX_WITH_HGCM
VMMDevReq_HGCMConnect = 60,
VMMDevReq_HGCMDisconnect = 61,
-#ifdef VBOX_WITH_64_BITS_GUESTS
VMMDevReq_HGCMCall32 = 62,
VMMDevReq_HGCMCall64 = 63,
-#else
- VMMDevReq_HGCMCall = 62,
-#endif /* VBOX_WITH_64_BITS_GUESTS */
+# ifdef IN_GUEST
+# if ARCH_BITS == 64
+ VMMDevReq_HGCMCall = VMMDevReq_HGCMCall64,
+# elif ARCH_BITS == 32 || ARCH_BITS == 16
+ VMMDevReq_HGCMCall = VMMDevReq_HGCMCall32,
+# else
+# error "Unsupported ARCH_BITS"
+# endif
+# endif
VMMDevReq_HGCMCancel = 64,
VMMDevReq_HGCMCancel2 = 65,
#endif
@@ -198,28 +203,6 @@ typedef enum VMMDevRequestType
VMMDevReq_SizeHack = 0x7fffffff
} VMMDevRequestType;
-#ifdef VBOX_WITH_64_BITS_GUESTS
-/*
- * Constants and structures are redefined for the guest.
- *
- * Host code MUST always use either *32 or *64 variant explicitely.
- * Host source code will use VBOX_HGCM_HOST_CODE define to catch undefined
- * data types and constants.
- *
- * This redefinition means that the new additions builds will use
- * the *64 or *32 variants depending on the current architecture bit count (ARCH_BITS).
- */
-# ifndef VBOX_HGCM_HOST_CODE
-# if ARCH_BITS == 64
-# define VMMDevReq_HGCMCall VMMDevReq_HGCMCall64
-# elif ARCH_BITS == 32 || ARCH_BITS == 16
-# define VMMDevReq_HGCMCall VMMDevReq_HGCMCall32
-# else
-# error "Unsupported ARCH_BITS"
-# endif
-# endif /* !VBOX_HGCM_HOST_CODE */
-#endif /* VBOX_WITH_64_BITS_GUESTS */
-
/** Version of VMMDevRequestHeader structure. */
#define VMMDEV_REQUEST_HEADER_VERSION (0x10001)
@@ -296,9 +279,8 @@ AssertCompileSize(VMMDevRequestHeader, 24);
/** Requestor process belongs to user on the physical console, but cannot
* ascertain that it is associated with that login. */
#define VMMDEV_REQUESTOR_CON_USER UINT32_C(0x00000030)
-/** Requestor process belongs to user on the physical console, but cannot
- * ascertain that it is associated with that login. */
-#define VMMDEV_REQUESTOR_CON_MASK UINT32_C(0x00000040)
+/** Mask the physical console state of the request. */
+#define VMMDEV_REQUESTOR_CON_MASK UINT32_C(0x00000030)
/** Requestor is member of special VirtualBox user group (not on windows). */
#define VMMDEV_REQUESTOR_GRP_VBOX UINT32_C(0x00000080)
@@ -548,6 +530,8 @@ AssertCompileSize(VMMDevReqHostVersion, 24+16);
#define VMMDEV_HVF_HGCM_EMBEDDED_BUFFERS RT_BIT_32(1)
/** HGCM supports the contiguous page list parameter type. */
#define VMMDEV_HVF_HGCM_CONTIGUOUS_PAGE_LIST RT_BIT_32(2)
+/** HGCM supports the no-bounce page list parameter type. */
+#define VMMDEV_HVF_HGCM_NO_BOUNCE_PAGE_LIST RT_BIT_32(3)
/** VMMDev supports fast IRQ acknowledgements. */
#define VMMDEV_HVF_FAST_IRQ_ACK RT_BIT_32(31)
/** @} */
@@ -1625,7 +1609,7 @@ AssertCompileSize(VMMDevHGCMDisconnect, 32+4);
/**
* HGCM call request structure.
*
- * Used by VMMDevReq_HGCMCall, VMMDevReq_HGCMCall32 and VMMDevReq_HGCMCall64.
+ * Used by VMMDevReq_HGCMCall32 and VMMDevReq_HGCMCall64.
*/
typedef struct
{
@@ -1648,10 +1632,11 @@ AssertCompileSize(VMMDevHGCMCall, 32+12);
#define VBOX_HGCM_F_PARM_DIRECTION_TO_HOST UINT32_C(0x00000001)
#define VBOX_HGCM_F_PARM_DIRECTION_FROM_HOST UINT32_C(0x00000002)
#define VBOX_HGCM_F_PARM_DIRECTION_BOTH UINT32_C(0x00000003)
+#define VBOX_HGCM_F_PARM_DIRECTION_MASK UINT32_C(0x00000003)
/** Macro for validating that the specified flags are valid. */
#define VBOX_HGCM_F_PARM_ARE_VALID(fFlags) \
- ( (fFlags) > VBOX_HGCM_F_PARM_DIRECTION_NONE \
- && (fFlags) <= VBOX_HGCM_F_PARM_DIRECTION_BOTH )
+ ( ((fFlags) & VBOX_HGCM_F_PARM_DIRECTION_MASK) \
+ && !((fFlags) & ~VBOX_HGCM_F_PARM_DIRECTION_MASK) )
/** @} */
/**
@@ -1780,15 +1765,12 @@ DECLINLINE(size_t) vmmdevGetRequestSize(VMMDevRequestType requestType)
return sizeof(VMMDevHGCMConnect);
case VMMDevReq_HGCMDisconnect:
return sizeof(VMMDevHGCMDisconnect);
-#ifdef VBOX_WITH_64_BITS_GUESTS
case VMMDevReq_HGCMCall32:
return sizeof(VMMDevHGCMCall);
+# ifdef VBOX_WITH_64_BITS_GUESTS
case VMMDevReq_HGCMCall64:
return sizeof(VMMDevHGCMCall);
-#else
- case VMMDevReq_HGCMCall:
- return sizeof(VMMDevHGCMCall);
-#endif /* VBOX_WITH_64_BITS_GUESTS */
+# endif
case VMMDevReq_HGCMCancel:
return sizeof(VMMDevHGCMCancel);
#endif /* VBOX_WITH_HGCM */
diff --git a/ubuntu/vbox/vboxguest/include/VBox/VMMDevCoreTypes.h b/ubuntu/vbox/vboxguest/include/VBox/VMMDevCoreTypes.h
index af024c2772d5..c8d5b60367dc 100644
--- a/ubuntu/vbox/vboxguest/include/VBox/VMMDevCoreTypes.h
+++ b/ubuntu/vbox/vboxguest/include/VBox/VMMDevCoreTypes.h
@@ -278,6 +278,7 @@ typedef enum
VMMDevHGCMParmType_PageList = 10, /**< Physical addresses of locked pages for a buffer. */
VMMDevHGCMParmType_Embedded = 11, /**< Small buffer embedded in request. */
VMMDevHGCMParmType_ContiguousPageList = 12, /**< Like PageList but with physically contiguous memory, so only one page entry. */
+ VMMDevHGCMParmType_NoBouncePageList = 13, /**< Like PageList but host function requires no bounce buffering. */
VMMDevHGCMParmType_SizeHack = 0x7fffffff
} HGCMFunctionParameterType;
AssertCompileSize(HGCMFunctionParameterType, 4);
diff --git a/ubuntu/vbox/vboxguest/include/VBox/err.h b/ubuntu/vbox/vboxguest/include/VBox/err.h
index 20e8432b25f1..c81ebc4de3eb 100644
--- a/ubuntu/vbox/vboxguest/include/VBox/err.h
+++ b/ubuntu/vbox/vboxguest/include/VBox/err.h
@@ -2117,6 +2117,10 @@
/** The behavior of the instruction/operation is modified/needs modification
* in VMX non-root mode. */
#define VINF_VMX_MODIFIES_BEHAVIOR 4036
+/** VMLAUNCH/VMRESUME succeeded, can enter nested-guest execution. */
+#define VINF_VMX_VMLAUNCH_VMRESUME 4037
+/** VT-x VMCS launch state invalid. */
+#define VERR_VMX_INVALID_VMCS_LAUNCH_STATE (-4038)
/** @} */
@@ -2718,10 +2722,14 @@
#define VWRN_GSTCTL_OBJECTSTATE_CHANGED 6220
/** Guest process is in a wrong state. */
#define VERR_GSTCTL_PROCESS_WRONG_STATE (-6221)
-/** Maximum objects has been reached. */
-#define VERR_GSTCTL_MAX_OBJECTS_REACHED (-6222)
+/** Maximum (context ID) sessions have been reached. */
+#define VERR_GSTCTL_MAX_CID_SESSIONS_REACHED (-6222)
+/** Maximum (context ID) objects have been reached. */
+#define VERR_GSTCTL_MAX_CID_OBJECTS_REACHED (-6223)
+/** Maximum (context ID object) count has been reached. */
+#define VERR_GSTCTL_MAX_CID_COUNT_REACHED (-6224)
/** Started guest process terminated with an exit code <> 0. */
-#define VERR_GSTCTL_PROCESS_EXIT_CODE (-6223)
+#define VERR_GSTCTL_PROCESS_EXIT_CODE (-6225)
/** @} */
diff --git a/ubuntu/vbox/vboxguest/include/internal/initterm.h b/ubuntu/vbox/vboxguest/include/internal/initterm.h
index 6926fc2458a8..6b19ea4d87ea 100644
--- a/ubuntu/vbox/vboxguest/include/internal/initterm.h
+++ b/ubuntu/vbox/vboxguest/include/internal/initterm.h
@@ -48,6 +48,11 @@ DECLHIDDEN(int) rtR0InitNative(void);
*/
DECLHIDDEN(void) rtR0TermNative(void);
+# ifdef RT_OS_LINUX
+/* in alloc-r0drv0-linux.c */
+DECLHIDDEN(void) rtR0MemExecCleanup(void);
+# endif
+
#endif /* IN_RING0 */
RT_C_DECLS_END
diff --git a/ubuntu/vbox/vboxguest/include/iprt/assertcompile.h b/ubuntu/vbox/vboxguest/include/iprt/assertcompile.h
index 24a3c2f3e783..c12b5aa94dac 100644
--- a/ubuntu/vbox/vboxguest/include/iprt/assertcompile.h
+++ b/ubuntu/vbox/vboxguest/include/iprt/assertcompile.h
@@ -107,7 +107,11 @@ RT_C_DECLS_END
* @param expr Expression which should be true.
*/
#ifdef RTASSERT_HAVE_STATIC_ASSERT
-# define AssertCompile(expr) static_assert(!!(expr), #expr)
+# ifdef __cplusplus
+# define AssertCompile(expr) static_assert(!!(expr), #expr)
+# else
+# define AssertCompile(expr) _Static_assert(!!(expr), #expr)
+# endif
#else
# define AssertCompile(expr) AssertCompileNS(expr)
#endif
diff --git a/ubuntu/vbox/vboxguest/include/iprt/cdefs.h b/ubuntu/vbox/vboxguest/include/iprt/cdefs.h
index ce7ee6ae7eea..8aa1736a9a6b 100644
--- a/ubuntu/vbox/vboxguest/include/iprt/cdefs.h
+++ b/ubuntu/vbox/vboxguest/include/iprt/cdefs.h
@@ -2686,28 +2686,28 @@
/** @def RT_BYTE1
* Gets the first byte of something. */
-#define RT_BYTE1(a) ( (a) & 0xff )
+#define RT_BYTE1(a) ( (uint8_t)((a) & 0xff) )
/** @def RT_BYTE2
* Gets the second byte of something. */
-#define RT_BYTE2(a) ( ((a) >> 8) & 0xff )
+#define RT_BYTE2(a) ( (uint8_t)(((a) >> 8) & 0xff) )
/** @def RT_BYTE3
* Gets the second byte of something. */
-#define RT_BYTE3(a) ( ((a) >> 16) & 0xff )
+#define RT_BYTE3(a) ( (uint8_t)(((a) >> 16) & 0xff) )
/** @def RT_BYTE4
* Gets the fourth byte of something. */
-#define RT_BYTE4(a) ( ((a) >> 24) & 0xff )
+#define RT_BYTE4(a) ( (uint8_t)(((a) >> 24) & 0xff) )
/** @def RT_BYTE5
* Gets the fifth byte of something. */
-#define RT_BYTE5(a) ( ((a) >> 32) & 0xff )
+#define RT_BYTE5(a) ( (uint8_t)(((a) >> 32) & 0xff) )
/** @def RT_BYTE6
* Gets the sixth byte of something. */
-#define RT_BYTE6(a) ( ((a) >> 40) & 0xff )
+#define RT_BYTE6(a) ( (uint8_t)(((a) >> 40) & 0xff) )
/** @def RT_BYTE7
* Gets the seventh byte of something. */
-#define RT_BYTE7(a) ( ((a) >> 48) & 0xff )
+#define RT_BYTE7(a) ( (uint8_t)(((a) >> 48) & 0xff) )
/** @def RT_BYTE8
* Gets the eight byte of something. */
-#define RT_BYTE8(a) ( ((a) >> 56) & 0xff )
+#define RT_BYTE8(a) ( (uint8_t)(((a) >> 56) & 0xff) )
/** @def RT_LODWORD
diff --git a/ubuntu/vbox/vboxguest/include/iprt/err.h b/ubuntu/vbox/vboxguest/include/iprt/err.h
index 20a0d2a3e301..3de9a3334cdc 100644
--- a/ubuntu/vbox/vboxguest/include/iprt/err.h
+++ b/ubuntu/vbox/vboxguest/include/iprt/err.h
@@ -915,6 +915,8 @@
#define VERR_LDRELF_INVALID_RELOCATION_OFFSET (-639)
/** The ELF loader didn't find the symbol/string table for the image. */
#define VERR_LDRELF_NO_SYMBOL_OR_NO_STRING_TABS (-640)
+/** The ELF loader encountered an unterminated string table. */
+#define VERR_LDRELF_UNTERMINATED_STRING_TAB (-641)
/** Invalid link address. */
#define VERR_LDR_INVALID_LINK_ADDRESS (-647)
/** Invalid image relative virtual address. */
diff --git a/ubuntu/vbox/vboxguest/include/iprt/errcore.h b/ubuntu/vbox/vboxguest/include/iprt/errcore.h
index 1a771b9d0286..88f95c83e82e 100644
--- a/ubuntu/vbox/vboxguest/include/iprt/errcore.h
+++ b/ubuntu/vbox/vboxguest/include/iprt/errcore.h
@@ -231,9 +231,9 @@ RTDECL(int) RTErrConvertFromDarwin(int iNativeCode);
* Converts errno to iprt status code.
*
* @returns iprt status code.
- * @param uNativeCode errno code.
+ * @param iNativeCode errno code.
*/
-RTDECL(int) RTErrConvertFromErrno(unsigned uNativeCode);
+RTDECL(int) RTErrConvertFromErrno(int iNativeCode);
/**
* Converts a L4 errno to a iprt status code.
diff --git a/ubuntu/vbox/vboxguest/include/iprt/log.h b/ubuntu/vbox/vboxguest/include/iprt/log.h
index 3c99cb21944a..fbd6f5fd7a8a 100644
--- a/ubuntu/vbox/vboxguest/include/iprt/log.h
+++ b/ubuntu/vbox/vboxguest/include/iprt/log.h
@@ -614,7 +614,7 @@ RTDECL(void) RTLogPrintfEx(void *pvInstance, unsigned fFlags, unsigned iGroup,
# define _LogIt(a_fFlags, a_iGroup, ...) \
do \
{ \
- register PRTLOGGER LogIt_pLogger = RTLogDefaultInstanceEx(RT_MAKE_U32(a_fFlags, a_iGroup)); \
+ PRTLOGGER LogIt_pLogger = RTLogDefaultInstanceEx(RT_MAKE_U32(a_fFlags, a_iGroup)); \
if (RT_LIKELY(!LogIt_pLogger)) \
{ /* likely */ } \
else \
@@ -628,7 +628,7 @@ RTDECL(void) RTLogPrintfEx(void *pvInstance, unsigned fFlags, unsigned iGroup,
# define LogIt(a_fFlags, a_iGroup, fmtargs) \
do \
{ \
- register PRTLOGGER LogIt_pLogger = RTLogDefaultInstanceEx(RT_MAKE_U32(a_fFlags, a_iGroup)); \
+ PRTLOGGER LogIt_pLogger = RTLogDefaultInstanceEx(RT_MAKE_U32(a_fFlags, a_iGroup)); \
if (RT_LIKELY(!LogIt_pLogger)) \
{ /* likely */ } \
else \
@@ -639,7 +639,7 @@ RTDECL(void) RTLogPrintfEx(void *pvInstance, unsigned fFlags, unsigned iGroup,
# define LogItAlways(a_fFlags, a_iGroup, fmtargs) \
do \
{ \
- register PRTLOGGER LogIt_pLogger = RTLogDefaultInstanceEx(RT_MAKE_U32(0, UINT16_MAX)); \
+ PRTLOGGER LogIt_pLogger = RTLogDefaultInstanceEx(RT_MAKE_U32(0, UINT16_MAX)); \
if (LogIt_pLogger) \
LogIt_pLogger->pfnLogger fmtargs; \
} while (0)
@@ -2011,6 +2011,8 @@ RTDECL(int) RTLogCreate(PRTLOGGER *ppLogger, uint32_t fFlags, const char *pszGro
* @param cGroups Number of groups in the array.
* @param papszGroups Pointer to array of groups. This must stick
* around for the life of the logger instance.
+ * @param cMaxEntriesPerGroup The max number of entries per group. UINT32_MAX
+ * or zero for unlimited.
* @param fDestFlags The destination flags. RTLOGDEST_FILE is ORed
* if pszFilenameFmt specified.
* @param pfnPhase Callback function for starting logging and for
@@ -2028,11 +2030,11 @@ RTDECL(int) RTLogCreate(PRTLOGGER *ppLogger, uint32_t fFlags, const char *pszGro
* @param pszFilenameFmt Log filename format string. Standard RTStrFormat().
* @param ... Format arguments.
*/
-RTDECL(int) RTLogCreateEx(PRTLOGGER *ppLogger, uint32_t fFlags, const char *pszGroupSettings,
- const char *pszEnvVarBase, unsigned cGroups, const char * const * papszGroups,
+RTDECL(int) RTLogCreateEx(PRTLOGGER *ppLogger, uint32_t fFlags, const char *pszGroupSettings, const char *pszEnvVarBase,
+ unsigned cGroups, const char * const * papszGroups, uint32_t cMaxEntriesPerGroup,
uint32_t fDestFlags, PFNRTLOGPHASE pfnPhase, uint32_t cHistory,
uint64_t cbHistoryFileMax, uint32_t cSecsHistoryTimeSlot, PRTERRINFO pErrInfo,
- const char *pszFilenameFmt, ...) RT_IPRT_FORMAT_ATTR_MAYBE_NULL(13, 14);
+ const char *pszFilenameFmt, ...) RT_IPRT_FORMAT_ATTR_MAYBE_NULL(14, 15);
/**
* Create a logger instance.
@@ -2048,6 +2050,8 @@ RTDECL(int) RTLogCreateEx(PRTLOGGER *ppLogger, uint32_t fFlags, const char *pszG
* @param cGroups Number of groups in the array.
* @param papszGroups Pointer to array of groups. This must stick
* around for the life of the logger instance.
+ * @param cMaxEntriesPerGroup The max number of entries per group. UINT32_MAX
+ * or zero for unlimited.
* @param fDestFlags The destination flags. RTLOGDEST_FILE is ORed
* if pszFilenameFmt specified.
* @param pfnPhase Callback function for starting logging and for
@@ -2066,11 +2070,11 @@ RTDECL(int) RTLogCreateEx(PRTLOGGER *ppLogger, uint32_t fFlags, const char *pszG
* RTStrFormat().
* @param args Format arguments.
*/
-RTDECL(int) RTLogCreateExV(PRTLOGGER *ppLogger, uint32_t fFlags, const char *pszGroupSettings,
- const char *pszEnvVarBase, unsigned cGroups, const char * const * papszGroups,
+RTDECL(int) RTLogCreateExV(PRTLOGGER *ppLogger, uint32_t fFlags, const char *pszGroupSettings, const char *pszEnvVarBase,
+ unsigned cGroups, const char * const * papszGroups, uint32_t cMaxEntriesPerGroup,
uint32_t fDestFlags, PFNRTLOGPHASE pfnPhase, uint32_t cHistory,
uint64_t cbHistoryFileMax, uint32_t cSecsHistoryTimeSlot, PRTERRINFO pErrInfo,
- const char *pszFilenameFmt, va_list args) RT_IPRT_FORMAT_ATTR_MAYBE_NULL(13, 0);
+ const char *pszFilenameFmt, va_list args) RT_IPRT_FORMAT_ATTR_MAYBE_NULL(14, 0);
/**
* Create a logger instance for singled threaded ring-0 usage.
diff --git a/ubuntu/vbox/vboxguest/include/iprt/mangling.h b/ubuntu/vbox/vboxguest/include/iprt/mangling.h
index b59ef06a6c4d..f1d2c89dadd1 100644
--- a/ubuntu/vbox/vboxguest/include/iprt/mangling.h
+++ b/ubuntu/vbox/vboxguest/include/iprt/mangling.h
@@ -808,6 +808,7 @@
# define RTVfsDirOpenDir RT_MANGLER(RTVfsDirOpenDir)
# define RTVfsDirFromRTDir RT_MANGLER(RTVfsDirFromRTDir)
# define RTVfsDirOpenNormal RT_MANGLER(RTVfsDirOpenNormal)
+# define RTVfsDirIsStdDir RT_MANGLER(RTVfsDirIsStdDir)
# define RTDvmCreate RT_MANGLER(RTDvmCreate)
# define RTDvmCreateFromVfsFile RT_MANGLER(RTDvmCreateFromVfsFile)
# define RTDvmRetain RT_MANGLER(RTDvmRetain)
@@ -912,9 +913,14 @@
# define RTFileCompareByHandlesEx RT_MANGLER(RTFileCompareByHandlesEx)
# define RTFileCompareEx RT_MANGLER(RTFileCompareEx)
# define RTFileCopy RT_MANGLER(RTFileCopy)
+# define RTFileCopyAttributes RT_MANGLER(RTFileCopyAttributes)
# define RTFileCopyByHandles RT_MANGLER(RTFileCopyByHandles)
# define RTFileCopyByHandlesEx RT_MANGLER(RTFileCopyByHandlesEx)
# define RTFileCopyEx RT_MANGLER(RTFileCopyEx)
+# define RTFileCopyPart RT_MANGLER(RTFileCopyPart)
+# define RTFileCopyPartCleanup RT_MANGLER(RTFileCopyPartCleanup)
+# define RTFileCopyPartEx RT_MANGLER(RTFileCopyPartEx)
+# define RTFileCopyPartPrep RT_MANGLER(RTFileCopyPartPrep)
# define RTFileCreateTemp RT_MANGLER(RTFileCreateTemp)
# define RTFileCreateTempSecure RT_MANGLER(RTFileCreateTempSecure)
# define RTFileDelete RT_MANGLER(RTFileDelete)
@@ -932,6 +938,7 @@
# define RTFileMove RT_MANGLER(RTFileMove)
# define RTFileOpen RT_MANGLER(RTFileOpen)
# define RTFileOpenBitBucket RT_MANGLER(RTFileOpenBitBucket)
+# define RTFileOpenEx RT_MANGLER(RTFileOpenEx)
# define RTFileOpenF RT_MANGLER(RTFileOpenF)
# define RTFileOpenV RT_MANGLER(RTFileOpenV)
# define RTFileOpenTemp RT_MANGLER(RTFileOpenTemp)
@@ -953,7 +960,9 @@
# define RTFileSetOwner RT_MANGLER(RTFileSetOwner)
# define RTFileSetSize RT_MANGLER(RTFileSetSize)
# define RTFileSetTimes RT_MANGLER(RTFileSetTimes)
+# define RTFileSgRead RT_MANGLER(RTFileSgRead)
# define RTFileSgReadAt RT_MANGLER(RTFileSgReadAt)
+# define RTFileSgWrite RT_MANGLER(RTFileSgWrite)
# define RTFileSgWriteAt RT_MANGLER(RTFileSgWriteAt)
# define RTFileTell RT_MANGLER(RTFileTell)
# define RTFileToNative RT_MANGLER(RTFileToNative)
@@ -1710,6 +1719,7 @@
# define RTR0MemUserCopyFrom RT_MANGLER(RTR0MemUserCopyFrom) /* r0drv */
# define RTR0MemUserCopyTo RT_MANGLER(RTR0MemUserCopyTo) /* r0drv */
# define RTR0MemUserIsValidAddr RT_MANGLER(RTR0MemUserIsValidAddr) /* r0drv */
+# define rtR0MemObjLinuxVirtToPage RT_MANGLER(rtR0MemObjLinuxVirtToPage) /* r0drv linux-only */
# define RTR0ProcHandleSelf RT_MANGLER(RTR0ProcHandleSelf) /* r0drv */
# define RTR0Term RT_MANGLER(RTR0Term) /* r0drv */
# define RTR0TermForced RT_MANGLER(RTR0TermForced) /* r0drv */
@@ -2574,6 +2584,7 @@
# define RTVfsDirReadEx RT_MANGLER(RTVfsDirReadEx)
# define RTVfsDirRemoveDir RT_MANGLER(RTVfsDirRemoveDir)
# define RTVfsDirSetPathMode RT_MANGLER(RTVfsDirSetPathMode)
+# define RTVfsDirToPrivate RT_MANGLER(RTVfsDirToPrivate)
# define RTVfsFileFlush RT_MANGLER(RTVfsFileFlush)
# define RTVfsFileFromBuffer RT_MANGLER(RTVfsFileFromBuffer)
# define RTVfsFileFromRTFile RT_MANGLER(RTVfsFileFromRTFile)
diff --git a/ubuntu/vbox/vboxguest/include/iprt/types.h b/ubuntu/vbox/vboxguest/include/iprt/types.h
index f33af38a9e06..8e3e635ba74d 100644
--- a/ubuntu/vbox/vboxguest/include/iprt/types.h
+++ b/ubuntu/vbox/vboxguest/include/iprt/types.h
@@ -59,21 +59,28 @@ RT_C_DECLS_END
# include <sys/types.h>
# elif defined(RT_OS_FREEBSD) && defined(_KERNEL)
+# include <sys/param.h>
+# undef PVM
+# if __FreeBSD_version < 1200000
/*
* Kludge for the FreeBSD kernel:
* stddef.h and sys/types.h have slightly different offsetof definitions
* when compiling in kernel mode. This is just to make GCC shut up.
*/
-# ifndef _STDDEF_H_
-# undef offsetof
-# endif
-# include <sys/stddef.h>
-# ifndef _SYS_TYPES_H_
-# undef offsetof
-# endif
-# include <sys/types.h>
-# ifndef offsetof
-# error "offsetof is not defined!"
+# ifndef _STDDEF_H_
+# undef offsetof
+# endif
+# include <sys/stddef.h>
+# ifndef _SYS_TYPES_H_
+# undef offsetof
+# endif
+# include <sys/types.h>
+# ifndef offsetof
+# error "offsetof is not defined!"
+# endif
+# else
+# include <sys/stddef.h>
+# include <sys/types.h>
# endif
# elif defined(RT_OS_FREEBSD) && HC_ARCH_BITS == 64 && defined(RT_ARCH_X86)
diff --git a/ubuntu/vbox/vboxguest/include/iprt/x86.h b/ubuntu/vbox/vboxguest/include/iprt/x86.h
index d32320a2a130..b6f26df409fa 100644
--- a/ubuntu/vbox/vboxguest/include/iprt/x86.h
+++ b/ubuntu/vbox/vboxguest/include/iprt/x86.h
@@ -4348,7 +4348,7 @@ AssertCompile((X86_SIB_SCALE_MASK >> X86_SIB_SCALE_SHIFT) == X86_SIB_SCALE_SMASK
#endif
/** @} */
-/** @name General register indexes
+/** @name General register indexes.
* @{ */
#define X86_GREG_xAX 0
#define X86_GREG_xCX 1
@@ -4367,6 +4367,8 @@ AssertCompile((X86_SIB_SCALE_MASK >> X86_SIB_SCALE_SHIFT) == X86_SIB_SCALE_SMASK
#define X86_GREG_x14 14
#define X86_GREG_x15 15
/** @} */
+/** General register count. */
+#define X86_GREG_COUNT 16
/** @name X86_SREG_XXX - Segment register indexes.
* @{ */
diff --git a/ubuntu/vbox/vboxguest/r0drv/alloc-r0drv.h b/ubuntu/vbox/vboxguest/r0drv/alloc-r0drv.h
index 80aca6788473..65fd48ddb098 100644
--- a/ubuntu/vbox/vboxguest/r0drv/alloc-r0drv.h
+++ b/ubuntu/vbox/vboxguest/r0drv/alloc-r0drv.h
@@ -74,6 +74,9 @@ typedef struct RTMEMHDR
# define RTMEMHDR_FLAG_EXEC_HEAP RT_BIT(30)
/** Linux: Allocated by kmalloc() instead of vmalloc(). */
# define RTMEMHDR_FLAG_KMALLOC RT_BIT(31)
+#elif defined(RT_OS_WINDOWS)
+/** Windows: Untagged allocation by ExAllocatePool, freed using ExFreePool. */
+# define RTMEMHDR_FLAG_UNTAGGED RT_BIT(31)
#endif
/** @} */
diff --git a/ubuntu/vbox/vboxguest/r0drv/linux/alloc-r0drv-linux.c b/ubuntu/vbox/vboxguest/r0drv/linux/alloc-r0drv-linux.c
index e724091590e9..4843cb0242cb 100644
--- a/ubuntu/vbox/vboxguest/r0drv/linux/alloc-r0drv-linux.c
+++ b/ubuntu/vbox/vboxguest/r0drv/linux/alloc-r0drv-linux.c
@@ -62,6 +62,8 @@
# include <iprt/errcore.h>
#endif
+#include "internal/initterm.h"
+
/*********************************************************************************************************************************
* Structures and Typedefs *
diff --git a/ubuntu/vbox/vboxguest/r0drv/linux/initterm-r0drv-linux.c b/ubuntu/vbox/vboxguest/r0drv/linux/initterm-r0drv-linux.c
index aeb4d97ee92c..26e273e74145 100644
--- a/ubuntu/vbox/vboxguest/r0drv/linux/initterm-r0drv-linux.c
+++ b/ubuntu/vbox/vboxguest/r0drv/linux/initterm-r0drv-linux.c
@@ -46,13 +46,6 @@ static DECLARE_TASK_QUEUE(g_rtR0LnxWorkQueue);
#endif
-/*********************************************************************************************************************************
-* Internal Functions *
-*********************************************************************************************************************************/
-/* in alloc-r0drv0-linux.c */
-DECLHIDDEN(void) rtR0MemExecCleanup(void);
-
-
/**
* Pushes an item onto the IPRT work queue.
*
diff --git a/ubuntu/vbox/vboxguest/r0drv/linux/memobj-r0drv-linux.c b/ubuntu/vbox/vboxguest/r0drv/linux/memobj-r0drv-linux.c
index d6f394001726..d11c2d7aa19a 100644
--- a/ubuntu/vbox/vboxguest/r0drv/linux/memobj-r0drv-linux.c
+++ b/ubuntu/vbox/vboxguest/r0drv/linux/memobj-r0drv-linux.c
@@ -38,6 +38,7 @@
#include <iprt/process.h>
#include <iprt/string.h>
#include "internal/memobj.h"
+#include "internal/iprt.h"
/*********************************************************************************************************************************
@@ -66,6 +67,11 @@
# define VBOX_USE_PAE_HACK
#endif
+/* gfp_t was introduced in 2.6.14, define it for earlier. */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14)
+# define gfp_t unsigned
+#endif
+
/*********************************************************************************************************************************
* Structures and Typedefs *
@@ -138,7 +144,7 @@ static pgprot_t rtR0MemObjLinuxConvertProt(unsigned fProt, bool fKernel)
switch (fProt)
{
default:
- AssertMsgFailed(("%#x %d\n", fProt, fKernel));
+ AssertMsgFailed(("%#x %d\n", fProt, fKernel)); RT_FALL_THRU();
case RTMEM_PROT_NONE:
return PAGE_NONE;
@@ -286,7 +292,7 @@ static void rtR0MemObjLinuxDoMunmap(void *pv, size_t cb, struct task_struct *pTa
* @param rcNoMem What to return when we're out of pages.
*/
static int rtR0MemObjLinuxAllocPages(PRTR0MEMOBJLNX *ppMemLnx, RTR0MEMOBJTYPE enmType, size_t cb,
- size_t uAlignment, unsigned fFlagsLnx, bool fContiguous, int rcNoMem)
+ size_t uAlignment, gfp_t fFlagsLnx, bool fContiguous, int rcNoMem)
{
size_t iPage;
size_t const cPages = cb >> PAGE_SHIFT;
@@ -782,7 +788,7 @@ DECLHIDDEN(int) rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb,
* @param fGfp The Linux GFP flags to use for the allocation.
*/
static int rtR0MemObjLinuxAllocPhysSub2(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJTYPE enmType,
- size_t cb, size_t uAlignment, RTHCPHYS PhysHighest, unsigned fGfp)
+ size_t cb, size_t uAlignment, RTHCPHYS PhysHighest, gfp_t fGfp)
{
PRTR0MEMOBJLNX pMemLnx;
int rc;
@@ -894,7 +900,7 @@ static int rtR0MemObjLinuxAllocPhysSub(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJTYP
* @returns Pointer to the page structur or NULL if it could not be found.
* @param pv The kernel virtual address.
*/
-static struct page *rtR0MemObjLinuxVirtToPage(void *pv)
+RTDECL(struct page *) rtR0MemObjLinuxVirtToPage(void *pv)
{
unsigned long ulAddr = (unsigned long)pv;
unsigned long pfn;
@@ -984,6 +990,7 @@ static struct page *rtR0MemObjLinuxVirtToPage(void *pv)
return NULL;
return pte_page(u.Entry);
}
+RT_EXPORT_SYMBOL(rtR0MemObjLinuxVirtToPage);
DECLHIDDEN(int) rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment)
@@ -1114,7 +1121,9 @@ DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3P
pTask->mm, /* Whose pages. */
R3Ptr, /* Where from. */
cPages, /* How many pages. */
-# if GET_USER_PAGES_API >= KERNEL_VERSION(4, 9, 0)
+/* The get_user_pages API change was back-ported to 4.4.168. */
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 168) \
+ && LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)
fWrite ? FOLL_WRITE | /* Write to memory. */
FOLL_FORCE /* force write access. */
: 0, /* Write to memory. */
diff --git a/ubuntu/vbox/vboxguest/r0drv/linux/the-linux-kernel.h b/ubuntu/vbox/vboxguest/r0drv/linux/the-linux-kernel.h
index e31f2fee1c5b..66dd734b08d1 100644
--- a/ubuntu/vbox/vboxguest/r0drv/linux/the-linux-kernel.h
+++ b/ubuntu/vbox/vboxguest/r0drv/linux/the-linux-kernel.h
@@ -128,6 +128,9 @@
# include <linux/cpu.h>
# include <linux/notifier.h>
#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0)
+# include <uapi/linux/mman.h>
+#endif
/* For the basic additions module */
#include <linux/pci.h>
#include <linux/delay.h>
@@ -159,7 +162,7 @@
# include <linux/tqueue.h>
#endif
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 4)
# include <linux/kthread.h>
#endif
@@ -457,5 +460,10 @@ typedef struct tq_struct RTR0LNXWORKQUEUEITEM;
DECLHIDDEN(void) rtR0LnxWorkqueuePush(RTR0LNXWORKQUEUEITEM *pWork, void (*pfnWorker)(RTR0LNXWORKQUEUEITEM *));
DECLHIDDEN(void) rtR0LnxWorkqueueFlush(void);
+/*
+ * Memory hacks from memobj-r0drv-linux.c that shared folders need.
+ */
+RTDECL(struct page *) rtR0MemObjLinuxVirtToPage(void *pv);
+
#endif /* !IPRT_INCLUDED_SRC_r0drv_linux_the_linux_kernel_h */
diff --git a/ubuntu/vbox/vboxguest/r0drv/linux/timer-r0drv-linux.c b/ubuntu/vbox/vboxguest/r0drv/linux/timer-r0drv-linux.c
index fa37980eb87b..e67aa4246011 100644
--- a/ubuntu/vbox/vboxguest/r0drv/linux/timer-r0drv-linux.c
+++ b/ubuntu/vbox/vboxguest/r0drv/linux/timer-r0drv-linux.c
@@ -544,7 +544,7 @@ static void rtTimerLnxCallbackHandleMigration(PRTTIMER pTimer, PRTTIMERLNXSUBTIM
break;
default:
- AssertMsgFailed(("%d\n", enmState));
+ AssertMsgFailed(("%d\n", enmState)); RT_FALL_THRU();
case RTTIMERLNXSTATE_STARTING:
case RTTIMERLNXSTATE_MP_STARTING:
case RTTIMERLNXSTATE_ACTIVE:
@@ -596,7 +596,7 @@ static bool rtTimerLnxChangeToCallbackStateSlow(PRTTIMERLNXSUBTIMER pSubTimer)
case RTTIMERLNXSTATE_CB_STOPPING:
case RTTIMERLNXSTATE_CB_RESTARTING:
case RTTIMERLNXSTATE_CB_DESTROYING:
- AssertMsgFailed(("%d\n", enmState));
+ AssertMsgFailed(("%d\n", enmState)); RT_FALL_THRU();
default:
return false;
}
diff --git a/ubuntu/vbox/vboxguest/revision-generated.h b/ubuntu/vbox/vboxguest/revision-generated.h
index a787df83ff85..fbe696a898a3 100644
--- a/ubuntu/vbox/vboxguest/revision-generated.h
+++ b/ubuntu/vbox/vboxguest/revision-generated.h
@@ -1 +1 @@
-#define VBOX_SVN_REV 128164
+#define VBOX_SVN_REV 129722
diff --git a/ubuntu/vbox/vboxguest/version-generated.h b/ubuntu/vbox/vboxguest/version-generated.h
index ff669b300eaa..85722b642c49 100644
--- a/ubuntu/vbox/vboxguest/version-generated.h
+++ b/ubuntu/vbox/vboxguest/version-generated.h
@@ -3,9 +3,9 @@
#define VBOX_VERSION_MAJOR 6
#define VBOX_VERSION_MINOR 0
-#define VBOX_VERSION_BUILD 4
-#define VBOX_VERSION_STRING_RAW "6.0.4"
-#define VBOX_VERSION_STRING "6.0.4_KernelUbuntu"
+#define VBOX_VERSION_BUILD 6
+#define VBOX_VERSION_STRING_RAW "6.0.6"
+#define VBOX_VERSION_STRING "6.0.6_KernelUbuntu"
#define VBOX_API_VERSION_STRING "6_0"
#define VBOX_PRIVATE_BUILD_DESC "Private build by buildd"
diff --git a/ubuntu/vbox/vboxsf/Makefile b/ubuntu/vbox/vboxsf/Makefile
index 02455b906864..85cb434ea43b 100644
--- a/ubuntu/vbox/vboxsf/Makefile
+++ b/ubuntu/vbox/vboxsf/Makefile
@@ -20,21 +20,25 @@ KBUILD_EXTMOD=${srctree}/ubuntu/vbox
# Linux kbuild sets this to our source directory if we are called from there
obj ?= $(CURDIR)
-include $(obj)/Makefile.include.header
+include $(obj)/Makefile-header.gmk
+VBOXSF_DIR = $(VBOX_MODULE_SRC_DIR)
-MOD_NAME = vboxsf
-MOD_OBJS = \
+VBOXMOD_NAME = vboxsf
+VBOXMOD_OBJS = \
vfsmod.o \
dirops.o \
lnkops.o \
regops.o \
utils.o \
+ VBoxGuestR0LibGenericRequest.o \
VBoxGuestR0LibHGCM.o \
VBoxGuestR0LibIdc.o \
VBoxGuestR0LibIdc-unix.o \
+ VBoxGuestR0LibInit.o \
+ VBoxGuestR0LibPhysHeap.o \
VBoxGuestR0LibSharedFolders.o
ifeq ($(BUILD_TARGET_ARCH),x86)
-MOD_OBJS += \
+VBOXMOD_OBJS += \
divdi3.o \
moddi3.o \
udivdi3.o \
@@ -42,51 +46,59 @@ MOD_OBJS += \
umoddi3.o \
qdivrem.o
endif
-
-MOD_INCL = \
- $(addprefix -I$(KBUILD_EXTMOD),/ /include /r0drv/linux) \
- $(addprefix -I$(KBUILD_EXTMOD)/vboxsf,/ /include /r0drv/linux)
-
-ifneq ($(wildcard $(KBUILD_EXTMOD)/vboxsf),)
- MANGLING := $(KBUILD_EXTMOD)/vboxsf/include/VBox/VBoxGuestMangling.h
-else
- MANGLING := $(KBUILD_EXTMOD)/include/VBox/VBoxGuestMangling.h
-endif
-
-MOD_DEFS = -DRT_OS_LINUX -DIN_RING0 -DIN_RT_R0 \
- -DIN_SUP_R0 -DVBOX -DVBOX_WITH_HGCM -DIN_MODULE -DIN_GUEST_R0
-# our module does not export any symbol
-MOD_DEFS += -DRT_NO_EXPORT_SYMBOL
+VBOXMOD_INCL = \
+ $(VBOXSF_DIR) \
+ $(VBOXSF_DIR)include \
+ $(VBOXSF_DIR)r0drv/linux
+VBOXMOD_DEFS = \
+ RT_OS_LINUX \
+ IN_RING0 \
+ IN_RT_R0 \
+ IN_SUP_R0 \
+ VBOX \
+ VBOX_WITH_HGCM \
+ IN_MODULE \
+ IN_GUEST \
+ IN_GUEST_R0 \
+ RT_NO_EXPORT_SYMBOL
ifeq ($(BUILD_TARGET_ARCH),amd64)
- MOD_DEFS += -DRT_ARCH_AMD64 -DVBOX_WITH_64_BITS_GUESTS
-else
- MOD_DEFS += -DRT_ARCH_X86
+VBOXMOD_DEFS += VBOX_WITH_64_BITS_GUESTS
+endif
+ifneq ($(filter %uek.x86_64,$(KERN_VER)),)
+VBOXMOD_DEFS += VBOX_UEK
+endif
+VBOXMOD_CFLAGS := $(call VBOX_GCC_CHECK_CC,-Wno-declaration-after-statement,-Wno-declaration-after-statement,,)
+VBOXMOD_CFLAGS += $(call VBOX_GCC_CHECK_CC,-fno-pie,-fno-pie,,)
+ifneq ($(KERN_VERSION),24)
+VBOXMOD_CFLAGS += -include $(VBOXSF_DIR)/include/VBox/VBoxGuestMangling.h
+## @todo r-bird: What's with -fshort-wchar here?? We either need that or we dont, right? It should be 2.6+ only.
+VBOXMOD_CFLAGS += -fshort-wchar
+endif
+ifdef VBOX_NO_OMIT_FRAME_POINTER
+VBOXMOD_CFLAGS += -fno-omit-frame-pointer
endif
-ifeq ($(KERN_VERSION), 24)
- MOD_CFLAGS =
-else
- MOD_CFLAGS = -Wno-declaration-after-statement -fshort-wchar -include $(MANGLING) -fno-pie
-
+ifneq ($(KERN_VERSION),24)
# special hack for Fedora Core 6 2.6.18 (fc6), rhel5 2.6.18 (el5),
# ClarkConnect 4.3 (cc4) and ClarkConnect 5 (v5)
ifeq ($(KERNELRELEASE),)
- MOD_EXTRA += $(foreach inc,$(KERN_INCL),\
- $(if $(wildcard $(inc)/linux/utsrelease.h),\
- $(if $(shell grep '"2.6.18.*fc6.*"' $(inc)/linux/utsrelease.h; \
- grep '"2.6.18.*el5.*"' $(inc)/linux/utsrelease.h; \
- grep '"2.6.18.*v5.*"' $(inc)/linux/utsrelease.h; \
- grep '"2.6.18.*cc4.*"' $(inc)/linux/utsrelease.h),\
- -DKERNEL_FC6,),))
+VBOXMOD_CFLAGS += $(foreach inc,$(KERN_INCL),\
+ $(if $(wildcard $(inc)/linux/utsrelease.h),\
+ $(if $(shell grep '"2.6.18.*fc6.*"' $(inc)/linux/utsrelease.h; \
+ grep '"2.6.18.*el5.*"' $(inc)/linux/utsrelease.h; \
+ grep '"2.6.18.*v5.*"' $(inc)/linux/utsrelease.h; \
+ grep '"2.6.18.*cc4.*"' $(inc)/linux/utsrelease.h),\
+ -DKERNEL_FC6,),))
else
- MOD_EXTRA += $(if $(shell echo "$(KERNELRELEASE)"|grep '2.6.18.*fc6.*';\
- echo "$(KERNELRELEASE)"|grep '2.6.18.*el5.*';\
- echo "$(KERNELRELEASE)"|grep '2.6.18.*v5.*';\
- echo "$(KERNELRELEASE)"|grep '2.6.18.*cc4.*'),\
+VBOXMOD_CFLAGS += $(if $(shell echo "$(KERNELRELEASE)"|grep '2.6.18.*fc6.*';\
+ echo "$(KERNELRELEASE)"|grep '2.6.18.*el5.*';\
+ echo "$(KERNELRELEASE)"|grep '2.6.18.*v5.*';\
+ echo "$(KERNELRELEASE)"|grep '2.6.18.*cc4.*'),\
-DKERNEL_FC6,)
endif
endif
-MOD_CLEAN = . linux r0drv r0drv/linux
+VBOXMOD_CLEAN = . linux r0drv r0drv/linux
+
+include $(obj)/Makefile-footer.gmk
-include $(obj)/Makefile.include.footer
diff --git a/ubuntu/vbox/vboxsf/Makefile-footer.gmk b/ubuntu/vbox/vboxsf/Makefile-footer.gmk
new file mode 100644
index 000000000000..adc2c2ebaaa1
--- /dev/null
+++ b/ubuntu/vbox/vboxsf/Makefile-footer.gmk
@@ -0,0 +1,128 @@
+# $Id: Makefile-footer.gmk $
+## @file
+# VirtualBox Guest Additions kernel module Makefile, common parts.
+#
+# See Makefile-header.gmk for details of how to use this.
+#
+
+#
+# Copyright (C) 2006-2019 Oracle Corporation
+#
+# This file is part of VirtualBox Open Source Edition (OSE), as
+# available from http://www.virtualbox.org. This file is free software;
+# you can redistribute it and/or modify it under the terms of the GNU
+# General Public License (GPL) as published by the Free Software
+# Foundation, in version 2 as it comes in the "COPYING" file of the
+# VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+# hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+#
+
+VBOXMOD_0_TARGET = $(VBOXMOD_NAME)
+
+KBUILD_VERBOSE ?= 1 # Variable belongs to our kBuild, not the linux one.
+VBOX_LNX_VERBOSE = $(if $(KBUILD_VERBOSE),1,)
+
+#
+# Compiler options
+#
+VBOXMOD_0_KFLAGS := -D__KERNEL__ -DMODULE -DRT_WITHOUT_PRAGMA_ONCE $(addprefix -D,$(VBOXMOD_DEFS))
+ifeq ($(BUILD_TARGET_ARCH),amd64)
+VBOXMOD_0_KFLAGS += -DRT_ARCH_AMD64
+else
+VBOXMOD_0_KFLAGS += -DRT_ARCH_X86
+endif
+
+ifeq ($(BUILD_TYPE),debug)
+# The -Wno-array-bounds is because of a bug in gcc 4.something, see
+# https://sourceware.org/bugzilla/show_bug.cgi?id=10001
+ VBOXMOD_0_KFLAGS += -DDEBUG -DDEBUG_$(subst $(subst _, ,_),_,$(USERNAME)) -DDEBUG_USERNAME=$(subst $(subst _, ,_),_,$(USERNAME))
+ ifeq ($(shell expr $(KERN_VER) : '[23]\.'),0)
+ VBOXMOD_0_KFLAGS += -Werror -Wall -Wno-array-bounds
+ endif
+endif
+
+ifeq ($(VBOX_KERN_GROKS_EXTMOD),)
+#
+# Pre 2.6.6
+#
+# Note: While pre 2.6.6 kernels could also do "proper" builds from kbuild, the
+# make script needed to support it was somewhat different from 2.6. Since this
+# script works and pre-2.6.6 is not a moving target we will not try do do things
+# the "proper" way.
+#
+VBOXMOD_EXT := o
+
+ ifeq ($(BUILD_TARGET_ARCH),amd64)
+VBOXMOD_0_KFLAGS += -mcmodel=kernel
+ endif
+ ifeq ($(KERN_VERSION),24)
+VBOXMOD_0_KFLAGS += -DVBOX_LINUX_2_4
+ endif
+
+CFLAGS := -O2 $(VBOXMOD_CFLAGS) $(addprefix -I,$(KERN_INCL) $(VBOXMOD_INCL)) $(VBOXMOD_0_KFLAGS) $(KDEBUG)
+
+# 2.4 Module linking
+$(VBOXMOD_0_TARGET).$(VBOXMOD_EXT): $(VBOXMOD_OBJS)
+ $(LD) -o $@ -r $(VBOXMOD_OBJS)
+
+all: $(VBOXMOD_0_TARGET)
+$(VBOXMOD_0_TARGET): $(VBOXMOD_0_TARGET).$(VBOXMOD_EXT)
+
+install: $(VBOXMOD_0_TARGET)
+ @mkdir -p $(MODULE_DIR); \
+ install -m 0644 -o root -g root $(VBOXMOD_0_TARGET).$(VBOXMOD_EXT) $(MODULE_DIR); \
+ PATH="$(PATH):/bin:/sbin" depmod -a; sync
+
+clean:
+ for f in $(sort $(dir $(VBOXMOD_OBJS))); do rm -f $$f/*.o $$f/.*.cmd $$f/.*.flags; done
+ rm -rf .$(VBOXMOD_NAME)* .tmp_ver* $(VBOXMOD_NAME).* Modules.symvers modules.order
+
+.PHONY: all $(VBOXMOD_0_TARGET) install clean
+
+else # VBOX_KERN_GROKS_EXTMOD
+#
+# 2.6.6 and later
+#
+VBOXMOD_EXT := ko
+
+# build defs
+EXTRA_CFLAGS += $(VBOXMOD_CFLAGS) $(addprefix -I,$(KERN_INCL) $(VBOXMOD_INCL)) $(VBOXMOD_0_KFLAGS) $(KDEBUG)
+$(VBOXMOD_0_TARGET)-y := $(VBOXMOD_OBJS)
+obj-m += $(VBOXMOD_0_TARGET).o
+
+# Trigger parallel make job.
+JOBS := $(shell (getconf _NPROCESSORS_ONLN || grep -Ec '^processor|^CPU[0-9]' /proc/cpuinfo) 2>/dev/null)
+ ifeq ($(JOBS),0)
+ override JOBS := 1
+ endif
+
+# rules:
+all: $(VBOXMOD_0_TARGET)
+
+# OL/UEK: disable module signing for external modules -- we don't have any private key
+$(VBOXMOD_0_TARGET):
+ifneq ($(VBOX_KERN_GROKS_SUBDIRS),)
+ $(MAKE) V=$(VBOX_LNX_VERBOSE) CONFIG_MODULE_SIG= -C $(KERN_DIR) SUBDIRS=$(CURDIR) SRCROOT=$(CURDIR) $(if $(JOBS),-j$(JOBS),) modules
+else
+ $(MAKE) V=$(VBOX_LNX_VERBOSE) CONFIG_MODULE_SIG= -C $(KERN_DIR) M=$(CURDIR) SRCROOT=$(CURDIR) $(if $(JOBS),-j$(JOBS),) modules
+endif
+
+install: $(VBOXMOD_0_TARGET)
+ifneq ($(VBOX_KERN_GROKS_SUBDIRS),)
+ $(MAKE) V=$(VBOX_LNX_VERBOSE) CONFIG_MODULE_SIG= -C $(KERN_DIR) SUBDIRS=$(CURDIR) SRCROOT=$(CURDIR) INSTALL_MOD_PATH=$(INSTALL_MOD_PATH) INSTALL_MOD_DIR=$(INSTALL_MOD_DIR) modules_install
+else
+ $(MAKE) V=$(VBOX_LNX_VERBOSE) CONFIG_MODULE_SIG= -C $(KERN_DIR) M=$(CURDIR) SRCROOT=$(CURDIR) INSTALL_MOD_PATH=$(INSTALL_MOD_PATH) INSTALL_MOD_DIR=$(INSTALL_MOD_DIR) modules_install
+endif
+
+modules_install: install
+
+clean:
+ifneq ($(VBOX_KERN_GROKS_SUBDIRS),)
+ $(MAKE) V=$(VBOX_LNX_VERBOSE) CONFIG_MODULE_SIG= -C $(KERN_DIR) SUBDIRS=$(CURDIR) SRCROOT=$(CURDIR) clean
+else
+ $(MAKE) V=$(VBOX_LNX_VERBOSE) CONFIG_MODULE_SIG= -C $(KERN_DIR) M=$(CURDIR) SRCROOT=$(CURDIR) clean
+endif
+
+.PHONY: all $(VBOXMOD_0_TARGET) install modules_install clean
+endif # VBOX_KERN_GROKS_EXTMOD
+
diff --git a/ubuntu/vbox/vboxguest/Makefile.include.header b/ubuntu/vbox/vboxsf/Makefile-header.gmk
similarity index 51%
rename from ubuntu/vbox/vboxguest/Makefile.include.header
rename to ubuntu/vbox/vboxsf/Makefile-header.gmk
index 8b0434bd508e..456d2789ec30 100644
--- a/ubuntu/vbox/vboxguest/Makefile.include.header
+++ b/ubuntu/vbox/vboxsf/Makefile-header.gmk
@@ -1,4 +1,4 @@
-# $Id: Makefile.include.header $
+# $Id: Makefile-header.gmk $
## @file
# VirtualBox Guest Additions kernel module Makefile, common parts.
#
@@ -26,16 +26,15 @@
# build as part of the Guest Additions. The intended way of doing this is as
# follows:
#
-# # Linux kbuild sets this to our source directory if we are called from
-# # there
+# # Linux kbuild sets this to our source directory if we are called from there
# obj ?= $(CURDIR)
-# include $(obj)/Makefile.include.header
-# MOD_NAME = <name of the module to be built, without extension>
-# MOD_OBJS = <list of object files which should be included>
-# MOD_DEFS = <any additional defines which this module needs>
-# MOD_INCL = <any additional include paths which this module needs>
-# MOD_CFLAGS = <any additional CFLAGS which this module needs>
-# include $(obj)/Makefile.include.footer
+# include $(obj)/Makefile-header.gmk
+# VBOXMOD_NAME = <name of the module to be built, without extension>
+# VBOXMOD_OBJS = <list of object files which should be included>
+# VBOXMOD_DEFS = <any additional defines which this module needs>
+# VBOXMOD_INCL = <any additional include paths which this module needs>
+# VBOXMOD_CFLAGS = <any additional CFLAGS which this module needs>
+# include $(obj)/Makefile-footer.gmk
#
# The kmk kBuild define KBUILD_TARGET_ARCH is available.
#
@@ -79,7 +78,9 @@ ifeq ($(BUILD_TYPE),)
BUILD_TYPE := release
else
ifneq ($(BUILD_TYPE),release)
- $(warning Using BUILD_TYPE='$(BUILD_TYPE)' from the $(origin BUILD_TYPE).)
+ ifndef VBOX_KERN_QUIET
+ $(warning Using BUILD_TYPE='$(BUILD_TYPE)' from the $(origin BUILD_TYPE).)
+ endif
endif
endif
ifeq ($(USERNAME),)
@@ -108,14 +109,35 @@ ifeq ($(KERNELRELEASE),)
$(error The kernel build folder path must end in <version>/build, or the variable KERN_VER must be set)
endif
endif
- KERN_VER ?= $(shell uname -r)
+ KERN_VER ?= $(shell uname -r)
endif
- # guess kernel major version (24 or later)
- ifeq ($(shell if grep '"2\.4\.' /lib/modules/$(KERN_VER)/build/include/linux/version.h > /dev/null 2>&1; then echo yes; fi),yes)
+ ifeq ($(KERN_DIR),)
+KERN_DIR := $(srctree)
+ endif
+
+ # Is this 2.4 or < 2.6.6? The UTS_RELEASE "2.x.y.z" define is present in the header until 2.6.1x something.
+ ifeq ($(shell if grep '"2\.4\.' $(KERN_DIR)/include/linux/version.h > /dev/null 2>&1; then echo yes; fi),yes)
KERN_VERSION := 24
+ VBOX_KERN_GROKS_EXTMOD :=
else
KERN_VERSION := 26
+ VBOX_KERN_GROKS_EXTMOD := yes
+ ifeq ($(shell if grep '"2\.6\.[012345][."]' $(KERN_DIR)/include/linux/version.h > /dev/null 2>&1; then echo yes; fi),yes)
+ VBOX_KERN_GROKS_EXTMOD :=
+ endif
+ VBOX_KERN_GROKS_SUBDIRS :=
+ ifeq ($(shell if grep '"[432]\.' $(KERN_DIR)/include/linux/version.h > /dev/null 2>&1; then echo yes; fi),yes)
+ VBOX_KERN_GROKS_SUBDIRS := yes
+ endif
+ endif
+
+ #
+ # Hack for Ubuntu 4.10 where we determine 2.6.8.1-3-generic-amd64 here, but the
+ # the next invocation (M/SUBDIR) ends up with KERNELRELEASE=2.6.8.1-3.
+ #
+ ifeq ($(shell if grep '"[2]\.' $(KERN_DIR)/include/linux/version.h > /dev/null 2>&1; then echo yes; fi),yes)
+ export KERN_VER KERN_DIR
endif
else # neq($(KERNELRELEASE),)
@@ -125,22 +147,39 @@ else # neq($(KERNELRELEASE),)
#
# guess kernel version (24 or 26)
- ifeq ($(shell if echo "$(VERSION).$(PATCHLEVEL)." | grep '2\.4\.' > /dev/null; then echo yes; fi),yes)
+ ifeq ($(VERSION).$(PATCHLEVEL),2.4)
KERN_VERSION := 24
+ VBOX_KERN_GROKS_EXTMOD :=
else
KERN_VERSION := 26
+ VBOX_KERN_GROKS_EXTMOD := yes
+ ifeq ($(VERSION).$(PATCHLEVEL),2.6)
+ ifeq ($(findstring @$(SUBLEVEL)@, at 0@1 at 2@3 at 4@5@),@$(SUBLEVEL)@)
+ VBOX_KERN_GROKS_EXTMOD :=
+ endif
+ endif
+ VBOX_KERN_GROKS_SUBDIRS :=
+ ifeq ($(VERSION),2)
+ VBOX_KERN_GROKS_SUBDIRS := yes
+ endif
+ ifeq ($(VERSION),3)
+ VBOX_KERN_GROKS_SUBDIRS := yes
+ endif
+ ifeq ($(VERSION),4)
+ VBOX_KERN_GROKS_SUBDIRS := yes
+ endif
endif
KERN_VER := $(KERNELRELEASE)
+ ifeq ($(KERN_DIR),)
+KERN_DIR := $(srctree)
+ endif
endif # neq($(KERNELRELEASE),)
# Kernel build folder
-ifeq ($(KERN_DIR),)
-KERN_DIR := $(srctree)
-endif
ifneq ($(shell if test -d $(KERN_DIR); then echo yes; fi),yes)
- $(error Error: unable to find the headers of the Linux kernel to build against. \
+ $(error Error: unable to find the headers of the Linux kernel to build against (KERN_DIR=$(KERN_DIR)). \
Specify KERN_VER=<version> (currently $(KERN_VER)) and run Make again)
endif
# Kernel include folder
@@ -149,12 +188,59 @@ KERN_INCL := $(KERN_DIR)/include
INSTALL_MOD_DIR ?= misc
MODULE_DIR := $(INSTALL_MOD_PATH)/lib/modules/$(KERN_VER)/$(INSTALL_MOD_DIR)
+#
+# The KBUILD_EXTMOD variable is used by 2.6.6 and later when build external
+# modules (see https://lwn.net/Articles/79984/). It will be set to SUBDIRS
+# or M by the linux kernel makefile. We fake it here for older kernels.
+#
+## @todo Drop this KBUILD_EXTMOD glue once it has been removed from all our makefiles (see sharedfolders).
+ifndef CURDIR # for make < v3.79
+ CURDIR := $(shell pwd)
+endif
+ifndef KBUILD_EXTMOD
+ KBUILD_EXTMOD := $(CURDIR)
+endif
+
+
+# For VBOX_GCC_CHECK_CC
+VBOX_CLOSEPAR := )
+VBOX_DOLLAR := $$
+## Modified VBOX_GCC_CHECK_EX_CC_CXX macro from /Config.kmk.
+# @param 1 The option to test for.
+# @param 2 The return value when supported.
+# @param 3 The return value when NOT supported.
+VBOX_GCC_CHECK_CC = $(shell \
+ > /tmp/$(VBOX_DOLLAR)$(VBOX_DOLLAR).check.c; \
+ if $(CC) $(subst -Wno-,-W,$(1)) -Werror -c -o /dev/null /tmp/$(VBOX_DOLLAR)$(VBOX_DOLLAR).check.c > /dev/null 2>&1; then \
+ case "`LC_ALL=C $(CC) $(subst -Wno-,-W,$(1)) -Werror -c -o /dev/null /tmp/$(VBOX_DOLLAR)$(VBOX_DOLLAR).check.c 2>&1`" in \
+ "error: unknown warning option"*$(VBOX_CLOSEPAR) echo "$(3)";; \
+ *$(VBOX_CLOSEPAR) echo "$(2)";; \
+ esac; \
+ else echo "$(3)"; fi; \
+ rm -f /tmp/$(VBOX_DOLLAR)$(VBOX_DOLLAR).check.c; )
+
+#
+# Guess the module directory ASSUMING that this file is located in that directory.
+# Note! The special MAKEFILE_LIST variable was introduced in GNU make 3.80.
+#
+ifdef MAKEFILE_LIST
+ VBOX_MODULE_SRC_DIR := $(dir $(word $(words $(MAKEFILE_LIST)),$(MAKEFILE_LIST)))
+else
+ VBOX_MODULE_SRC_DIR := $(CURDIR)/
+endif
+
+
# debug - show guesses.
ifdef DEBUG
+ ifndef VBOX_KERN_QUIET
$(warning dbg: INSTALL_MOD_PATH = $(INSTALL_MOD_PATH))
$(warning dbg: INSTALL_MOD_DIR = $(INSTALL_MOD_DIR))
$(warning dbg: KERN_DIR = $(KERN_DIR))
$(warning dbg: KERN_INCL = $(KERN_INCL))
$(warning dbg: KERN_VERSION = $(KERN_VERSION))
$(warning dbg: MODULE_DIR = $(MODULE_DIR))
+$(warning dbg: KBUILD_EXTMOD = $(KBUILD_EXTMOD))
+$(warning dbg: VBOX_MODULE_SRC_DIR = $(VBOX_MODULE_SRC_DIR))
+ endif
endif
+
diff --git a/ubuntu/vbox/vboxsf/Makefile.include.footer b/ubuntu/vbox/vboxsf/Makefile.include.footer
deleted file mode 100644
index 7e04c3153eaa..000000000000
--- a/ubuntu/vbox/vboxsf/Makefile.include.footer
+++ /dev/null
@@ -1,117 +0,0 @@
-# $Id: Makefile.include.footer $
-## @file
-# VirtualBox Guest Additions kernel module Makefile, common parts.
-#
-# See Makefile.include.header for details of how to use this.
-#
-
-#
-# Copyright (C) 2006-2019 Oracle Corporation
-#
-# This file is part of VirtualBox Open Source Edition (OSE), as
-# available from http://www.virtualbox.org. This file is free software;
-# you can redistribute it and/or modify it under the terms of the GNU
-# General Public License (GPL) as published by the Free Software
-# Foundation, in version 2 as it comes in the "COPYING" file of the
-# VirtualBox OSE distribution. VirtualBox OSE is distributed in the
-# hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
-#
-
-# override is required by the Debian guys
-override MODULE = $(MOD_NAME)
-OBJS = $(MOD_OBJS)
-
-KBUILD_VERBOSE ?= 1
-LINUX_VERBOSE = $(if $(KBUILD_VERBOSE),1,)
-
-#
-# Compiler options
-#
-ifndef INCL
- INCL := $(addprefix -I,$(KERN_INCL) $(EXTRA_INCL))
- ifndef KBUILD_EXTMOD
- KBUILD_EXTMOD := $(shell pwd)
- endif
- INCL += $(MOD_INCL)
- export INCL
-endif
-KFLAGS := -D__KERNEL__ -DMODULE -DRT_WITHOUT_PRAGMA_ONCE $(MOD_DEFS)
-ifeq ($(BUILD_TYPE),debug)
-# The -Wno-array-bounds is because of a bug in gcc 4.something, see
-# https://sourceware.org/bugzilla/show_bug.cgi?id=10001
- KFLAGS += -DDEBUG -DDEBUG_$(subst $(subst _, ,_),_,$(USERNAME)) -DDEBUG_USERNAME=$(subst $(subst _, ,_),_,$(USERNAME))
- ifeq ($(shell expr $(KERN_VER) : '[23]\.'),0)
- KFLAGS += -Werror -Wall -Wno-array-bounds
- endif
-endif
-
-ifeq ($(KERN_VERSION), 24)
-#
-# 2.4
-#
-
-# Note: while 2.4 kernels could also do "proper" builds from kbuild, the make
-# script needed to support it was somewhat different from 2.6. Since this
-# script works and 2.4 is not a moving target we will not try do do things the
-# "proper" way.
-
-ifeq ($(BUILD_TARGET_ARCH),amd64)
- KFLAGS += -mcmodel=kernel
-endif
-
-CFLAGS := -O2 -DVBOX_LINUX_2_4 $(MOD_CFLAGS) $(INCL) $(KFLAGS) $(MOD_EXTRA) $(KDEBUG)
-MODULE_EXT := o
-
-# 2.4 Module linking
-$(MODULE).o: $(OBJS)
- $(LD) -o $@ -r $(OBJS)
-
-.PHONY: $(MODULE)
-all: $(MODULE)
-$(MODULE): $(MODULE).o
-
-install: $(MODULE)
- @mkdir -p $(MODULE_DIR); \
- install -m 0644 -o root -g root $(MODULE).$(MODULE_EXT) $(MODULE_DIR); \
- PATH="$(PATH):/bin:/sbin" depmod -a; sync
-
-clean:
- for f in $(sort $(dir $(OBJS))); do rm -f $$f/*.o $$f/.*.cmd $$f/.*.flags; done
- rm -rf .$(MOD_NAME)* .tmp_ver* $(MOD_NAME).* Modules.symvers modules.order
-
-else # ! $(KERN_VERSION), 24
-#
-# 2.6 and later
-#
-
-MODULE_EXT := ko
-
-$(MODULE)-y := $(OBJS)
-
-# build defs
-EXTRA_CFLAGS += $(MOD_CFLAGS) $(INCL) $(KFLAGS) $(MOD_EXTRA) $(KDEBUG)
-
-.PHONY: $(MODULE)
-all: $(MODULE)
-
-obj-m += $(MODULE).o
-
-JOBS := $(shell (getconf _NPROCESSORS_ONLN || grep -Ec '^processor|^CPU[0-9]' /proc/cpuinfo) 2>/dev/null)
-ifeq ($(JOBS),0)
- override JOBS := 1
-endif
-
-# OL/UEK: disable module signing for external modules -- we don't have any private key
-$(MODULE):
- $(MAKE) V=$(LINUX_VERBOSE) CONFIG_MODULE_SIG= -C $(KERN_DIR) SUBDIRS=$(CURDIR) SRCROOT=$(CURDIR) $(if $(JOBS),-j$(JOBS),) modules
-
-install: $(MODULE)
- $(MAKE) V=$(LINUX_VERBOSE) CONFIG_MODULE_SIG= -C $(KERN_DIR) SUBDIRS=$(CURDIR) SRCROOT=$(CURDIR) INSTALL_MOD_PATH=$(INSTALL_MOD_PATH) INSTALL_MOD_DIR=$(INSTALL_MOD_DIR) modules_install
-
-modules_install: install
-
-clean:
- $(MAKE) V=$(LINUX_VERBOSE) CONFIG_MODULE_SIG= -C $(KERN_DIR) SUBDIRS=$(CURDIR) SRCROOT=$(CURDIR) clean
-
-.PHONY: $(MODULE) install modules_install clean
-endif
diff --git a/ubuntu/vbox/vboxsf/VBoxGuestR0LibGenericRequest.c b/ubuntu/vbox/vboxsf/VBoxGuestR0LibGenericRequest.c
new file mode 100644
index 000000000000..9391957b4b03
--- /dev/null
+++ b/ubuntu/vbox/vboxsf/VBoxGuestR0LibGenericRequest.c
@@ -0,0 +1,183 @@
+/* $Id: VBoxGuestR0LibGenericRequest.cpp $ */
+/** @file
+ * VBoxGuestLibR0 - Generic VMMDev request management.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "VBoxGuestR0LibInternal.h"
+#include <iprt/asm.h>
+#include <iprt/asm-amd64-x86.h>
+#include <iprt/assert.h>
+#include <iprt/string.h>
+#include <VBox/err.h>
+
+
+DECLR0VBGL(int) VbglGR0Verify(const VMMDevRequestHeader *pReq, size_t cbReq)
+{
+ size_t cbReqExpected;
+
+ if (RT_UNLIKELY(!pReq || cbReq < sizeof(VMMDevRequestHeader)))
+ {
+ dprintf(("VbglGR0Verify: Invalid parameter: pReq = %p, cbReq = %zu\n", pReq, cbReq));
+ return VERR_INVALID_PARAMETER;
+ }
+
+ if (RT_UNLIKELY(pReq->size > cbReq))
+ {
+ dprintf(("VbglGR0Verify: request size %u > buffer size %zu\n", pReq->size, cbReq));
+ return VERR_INVALID_PARAMETER;
+ }
+
+ /* The request size must correspond to the request type. */
+ cbReqExpected = vmmdevGetRequestSize(pReq->requestType);
+ if (RT_UNLIKELY(cbReq < cbReqExpected))
+ {
+ dprintf(("VbglGR0Verify: buffer size %zu < expected size %zu\n", cbReq, cbReqExpected));
+ return VERR_INVALID_PARAMETER;
+ }
+
+ if (cbReqExpected == cbReq)
+ {
+ /*
+ * This is most likely a fixed size request, and in this case the
+ * request size must be also equal to the expected size.
+ */
+ if (RT_UNLIKELY(pReq->size != cbReqExpected))
+ {
+ dprintf(("VbglGR0Verify: request size %u != expected size %zu\n", pReq->size, cbReqExpected));
+ return VERR_INVALID_PARAMETER;
+ }
+
+ return VINF_SUCCESS;
+ }
+
+ /*
+ * This can be a variable size request. Check the request type and limit the size
+ * to VMMDEV_MAX_VMMDEVREQ_SIZE, which is max size supported by the host.
+ *
+ * Note: Keep this list sorted for easier human lookup!
+ */
+ if ( pReq->requestType == VMMDevReq_ChangeMemBalloon
+ || pReq->requestType == VMMDevReq_GetDisplayChangeRequestMulti
+#ifdef VBOX_WITH_64_BITS_GUESTS
+ || pReq->requestType == VMMDevReq_HGCMCall64
+#endif
+ || pReq->requestType == VMMDevReq_HGCMCall32
+ || pReq->requestType == VMMDevReq_RegisterSharedModule
+ || pReq->requestType == VMMDevReq_ReportGuestUserState
+ || pReq->requestType == VMMDevReq_LogString
+ || pReq->requestType == VMMDevReq_SetPointerShape
+ || pReq->requestType == VMMDevReq_VideoSetVisibleRegion)
+ {
+ if (RT_UNLIKELY(cbReq > VMMDEV_MAX_VMMDEVREQ_SIZE))
+ {
+ dprintf(("VbglGR0Verify: VMMDevReq_LogString: buffer size %zu too big\n", cbReq));
+ return VERR_BUFFER_OVERFLOW; /** @todo is this error code ok? */
+ }
+ }
+ else
+ {
+ dprintf(("VbglGR0Verify: request size %u > buffer size %zu\n", pReq->size, cbReq));
+ return VERR_IO_BAD_LENGTH; /** @todo is this error code ok? */
+ }
+
+ return VINF_SUCCESS;
+}
+
+DECLR0VBGL(int) VbglR0GRAlloc(VMMDevRequestHeader **ppReq, size_t cbReq, VMMDevRequestType enmReqType)
+{
+ int rc = vbglR0Enter();
+ if (RT_SUCCESS(rc))
+ {
+ if ( ppReq
+ && cbReq >= sizeof(VMMDevRequestHeader)
+ && cbReq == (uint32_t)cbReq)
+ {
+ VMMDevRequestHeader *pReq = (VMMDevRequestHeader *)VbglR0PhysHeapAlloc((uint32_t)cbReq);
+ AssertMsgReturn(pReq, ("VbglR0GRAlloc: no memory (cbReq=%u)\n", cbReq), VERR_NO_MEMORY);
+ memset(pReq, 0xAA, cbReq);
+
+ pReq->size = (uint32_t)cbReq;
+ pReq->version = VMMDEV_REQUEST_HEADER_VERSION;
+ pReq->requestType = enmReqType;
+ pReq->rc = VERR_GENERAL_FAILURE;
+ pReq->reserved1 = 0;
+#ifdef VBGL_VBOXGUEST
+ pReq->fRequestor = VMMDEV_REQUESTOR_KERNEL | VMMDEV_REQUESTOR_USR_DRV
+#else
+ pReq->fRequestor = VMMDEV_REQUESTOR_KERNEL | VMMDEV_REQUESTOR_USR_DRV_OTHER
+#endif
+
+ | VMMDEV_REQUESTOR_CON_DONT_KNOW | VMMDEV_REQUESTOR_TRUST_NOT_GIVEN;
+ *ppReq = pReq;
+ rc = VINF_SUCCESS;
+ }
+ else
+ {
+ dprintf(("VbglR0GRAlloc: Invalid parameter: ppReq=%p cbReq=%u\n", ppReq, cbReq));
+ rc = VERR_INVALID_PARAMETER;
+ }
+ }
+ return rc;
+}
+
+DECLR0VBGL(int) VbglR0GRPerform(VMMDevRequestHeader *pReq)
+{
+ int rc = vbglR0Enter();
+ if (RT_SUCCESS(rc))
+ {
+ if (pReq)
+ {
+ RTCCPHYS PhysAddr = VbglR0PhysHeapGetPhysAddr(pReq);
+ if ( PhysAddr != 0
+ && PhysAddr < _4G) /* Port IO is 32 bit. */
+ {
+ ASMOutU32(g_vbgldata.portVMMDev + VMMDEV_PORT_OFF_REQUEST, (uint32_t)PhysAddr);
+ /* Make the compiler aware that the host has changed memory. */
+ ASMCompilerBarrier();
+ rc = pReq->rc;
+ }
+ else
+ rc = VERR_VBGL_INVALID_ADDR;
+ }
+ else
+ rc = VERR_INVALID_PARAMETER;
+ }
+ return rc;
+}
+
+DECLR0VBGL(void) VbglR0GRFree(VMMDevRequestHeader *pReq)
+{
+ int rc = vbglR0Enter();
+ if (RT_SUCCESS(rc))
+ VbglR0PhysHeapFree(pReq);
+}
+
diff --git a/ubuntu/vbox/vboxsf/VBoxGuestR0LibInit.c b/ubuntu/vbox/vboxsf/VBoxGuestR0LibInit.c
new file mode 100644
index 000000000000..3a758d701d81
--- /dev/null
+++ b/ubuntu/vbox/vboxsf/VBoxGuestR0LibInit.c
@@ -0,0 +1,333 @@
+/* $Id: VBoxGuestR0LibInit.cpp $ */
+/** @file
+ * VBoxGuestLibR0 - Library initialization.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "VBoxGuestR0LibInternal.h"
+
+#include <iprt/string.h>
+#include <iprt/assert.h>
+#include <iprt/semaphore.h>
+#include <VBox/err.h>
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+/** The global VBGL instance data. */
+VBGLDATA g_vbgldata;
+
+
+/**
+ * Used by vbglR0QueryDriverInfo and VbglInit to try get the host feature mask
+ * and version information (g_vbgldata::hostVersion).
+ *
+ * This was first implemented by the host in 3.1 and we quietly ignore failures
+ * for that reason.
+ */
+static void vbglR0QueryHostVersion(void)
+{
+ VMMDevReqHostVersion *pReq;
+ int rc = VbglR0GRAlloc((VMMDevRequestHeader **) &pReq, sizeof (*pReq), VMMDevReq_GetHostVersion);
+ if (RT_SUCCESS(rc))
+ {
+ rc = VbglR0GRPerform(&pReq->header);
+ if (RT_SUCCESS(rc))
+ {
+ g_vbgldata.hostVersion = *pReq;
+ Log(("vbglR0QueryHostVersion: %u.%u.%ur%u %#x\n",
+ pReq->major, pReq->minor, pReq->build, pReq->revision, pReq->features));
+ }
+
+ VbglR0GRFree(&pReq->header);
+ }
+}
+
+
+#ifndef VBGL_VBOXGUEST
+/**
+ * The guest library uses lazy initialization for VMMDev port and memory,
+ * because these values are provided by the VBoxGuest driver and it might
+ * be loaded later than other drivers.
+ *
+ * The VbglEnter checks the current library status, tries to retrieve these
+ * values and fails if they are unavailable.
+ */
+static int vbglR0QueryDriverInfo(void)
+{
+# ifdef VBGLDATA_USE_FAST_MUTEX
+ int rc = RTSemFastMutexRequest(g_vbgldata.hMtxIdcSetup);
+# else
+ int rc = RTSemMutexRequest(g_vbgldata.hMtxIdcSetup, RT_INDEFINITE_WAIT);
+# endif
+ if (RT_SUCCESS(rc))
+ {
+ if (g_vbgldata.status == VbglStatusReady)
+ { /* likely */ }
+ else
+ {
+ rc = VbglR0IdcOpen(&g_vbgldata.IdcHandle,
+ VBGL_IOC_VERSION /*uReqVersion*/,
+ VBGL_IOC_VERSION & UINT32_C(0xffff0000) /*uMinVersion*/,
+ NULL /*puSessionVersion*/, NULL /*puDriverVersion*/, NULL /*puDriverRevision*/);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Try query the port info.
+ */
+ VBGLIOCGETVMMDEVIOINFO PortInfo;
+ RT_ZERO(PortInfo);
+ VBGLREQHDR_INIT(&PortInfo.Hdr, GET_VMMDEV_IO_INFO);
+ rc = VbglR0IdcCall(&g_vbgldata.IdcHandle, VBGL_IOCTL_GET_VMMDEV_IO_INFO, &PortInfo.Hdr, sizeof(PortInfo));
+ if (RT_SUCCESS(rc))
+ {
+ dprintf(("Port I/O = 0x%04x, MMIO = %p\n", PortInfo.u.Out.IoPort, PortInfo.u.Out.pvVmmDevMapping));
+
+ g_vbgldata.portVMMDev = PortInfo.u.Out.IoPort;
+ g_vbgldata.pVMMDevMemory = (VMMDevMemory *)PortInfo.u.Out.pvVmmDevMapping;
+ g_vbgldata.status = VbglStatusReady;
+
+ vbglR0QueryHostVersion();
+ }
+ }
+
+ dprintf(("vbglQueryDriverInfo rc = %Rrc\n", rc));
+ }
+
+# ifdef VBGLDATA_USE_FAST_MUTEX
+ RTSemFastMutexRelease(g_vbgldata.hMtxIdcSetup);
+# else
+ RTSemMutexRelease(g_vbgldata.hMtxIdcSetup);
+# endif
+ }
+ return rc;
+}
+#endif /* !VBGL_VBOXGUEST */
+
+/**
+ * Checks if VBGL has been initialized.
+ *
+ * The client library, this will lazily complete the initialization.
+ *
+ * @return VINF_SUCCESS or VERR_VBGL_NOT_INITIALIZED.
+ */
+int vbglR0Enter(void)
+{
+ if (g_vbgldata.status == VbglStatusReady)
+ return VINF_SUCCESS;
+
+#ifndef VBGL_VBOXGUEST
+ if (g_vbgldata.status == VbglStatusInitializing)
+ {
+ vbglR0QueryDriverInfo();
+ if (g_vbgldata.status == VbglStatusReady)
+ return VINF_SUCCESS;
+ }
+#endif
+ return VERR_VBGL_NOT_INITIALIZED;
+}
+
+
+static int vbglR0InitCommon(void)
+{
+ int rc;
+
+ RT_ZERO(g_vbgldata);
+ g_vbgldata.status = VbglStatusInitializing;
+
+ rc = VbglR0PhysHeapInit();
+ if (RT_SUCCESS(rc))
+ {
+ dprintf(("vbglR0InitCommon: returns rc = %d\n", rc));
+ return rc;
+ }
+
+ LogRel(("vbglR0InitCommon: VbglR0PhysHeapInit failed: rc=%Rrc\n", rc));
+ g_vbgldata.status = VbglStatusNotInitialized;
+ return rc;
+}
+
+
+static void vbglR0TerminateCommon(void)
+{
+ VbglR0PhysHeapTerminate();
+ g_vbgldata.status = VbglStatusNotInitialized;
+}
+
+#ifdef VBGL_VBOXGUEST
+
+DECLR0VBGL(int) VbglR0InitPrimary(RTIOPORT portVMMDev, VMMDevMemory *pVMMDevMemory, uint32_t *pfFeatures)
+{
+ int rc;
+
+# ifdef RT_OS_WINDOWS /** @todo r=bird: this doesn't make sense. Is there something special going on on windows? */
+ dprintf(("vbglInit: starts g_vbgldata.status %d\n", g_vbgldata.status));
+
+ if ( g_vbgldata.status == VbglStatusInitializing
+ || g_vbgldata.status == VbglStatusReady)
+ {
+ /* Initialization is already in process. */
+ return VINF_SUCCESS;
+ }
+# else
+ dprintf(("vbglInit: starts\n"));
+# endif
+
+ rc = vbglR0InitCommon();
+ if (RT_SUCCESS(rc))
+ {
+ g_vbgldata.portVMMDev = portVMMDev;
+ g_vbgldata.pVMMDevMemory = pVMMDevMemory;
+ g_vbgldata.status = VbglStatusReady;
+
+ vbglR0QueryHostVersion();
+ *pfFeatures = g_vbgldata.hostVersion.features;
+ return VINF_SUCCESS;
+ }
+
+ g_vbgldata.status = VbglStatusNotInitialized;
+ return rc;
+}
+
+DECLR0VBGL(void) VbglR0TerminatePrimary(void)
+{
+ vbglR0TerminateCommon();
+}
+
+
+#else /* !VBGL_VBOXGUEST */
+
+DECLR0VBGL(int) VbglR0InitClient(void)
+{
+ int rc;
+
+ /** @todo r=bird: explain why we need to be doing this, please... */
+ if ( g_vbgldata.status == VbglStatusInitializing
+ || g_vbgldata.status == VbglStatusReady)
+ {
+ /* Initialization is already in process. */
+ return VINF_SUCCESS;
+ }
+
+ rc = vbglR0InitCommon();
+ if (RT_SUCCESS(rc))
+ {
+# ifdef VBGLDATA_USE_FAST_MUTEX
+ rc = RTSemFastMutexCreate(&g_vbgldata.hMtxIdcSetup);
+# else
+ rc = RTSemMutexCreate(&g_vbgldata.hMtxIdcSetup);
+# endif
+ if (RT_SUCCESS(rc))
+ {
+ /* Try to obtain VMMDev port via IOCTL to VBoxGuest main driver. */
+ vbglR0QueryDriverInfo();
+
+# ifdef VBOX_WITH_HGCM
+ rc = VbglR0HGCMInit();
+# endif
+ if (RT_SUCCESS(rc))
+ return VINF_SUCCESS;
+
+# ifdef VBGLDATA_USE_FAST_MUTEX
+ RTSemFastMutexDestroy(g_vbgldata.hMtxIdcSetup);
+ g_vbgldata.hMtxIdcSetup = NIL_RTSEMFASTMUTEX;
+# else
+ RTSemMutexDestroy(g_vbgldata.hMtxIdcSetup);
+ g_vbgldata.hMtxIdcSetup = NIL_RTSEMMUTEX;
+# endif
+ }
+ vbglR0TerminateCommon();
+ }
+
+ return rc;
+}
+
+DECLR0VBGL(void) VbglR0TerminateClient(void)
+{
+# ifdef VBOX_WITH_HGCM
+ VbglR0HGCMTerminate();
+# endif
+
+ /* driver open could fail, which does not prevent VbglInit from succeeding,
+ * close the driver only if it is opened */
+ VbglR0IdcClose(&g_vbgldata.IdcHandle);
+# ifdef VBGLDATA_USE_FAST_MUTEX
+ RTSemFastMutexDestroy(g_vbgldata.hMtxIdcSetup);
+ g_vbgldata.hMtxIdcSetup = NIL_RTSEMFASTMUTEX;
+# else
+ RTSemMutexDestroy(g_vbgldata.hMtxIdcSetup);
+ g_vbgldata.hMtxIdcSetup = NIL_RTSEMMUTEX;
+# endif
+
+ /* note: do vbglR0TerminateCommon as a last step since it zeroez up the g_vbgldata
+ * conceptually, doing vbglR0TerminateCommon last is correct
+ * since this is the reverse order to how init is done */
+ vbglR0TerminateCommon();
+}
+
+
+int VBOXCALL vbglR0QueryIdcHandle(PVBGLIDCHANDLE *ppIdcHandle)
+{
+ if (g_vbgldata.status == VbglStatusReady)
+ { /* likely */ }
+ else
+ {
+ vbglR0QueryDriverInfo();
+ if (g_vbgldata.status != VbglStatusReady)
+ {
+ *ppIdcHandle = NULL;
+ return VERR_TRY_AGAIN;
+ }
+ }
+
+ *ppIdcHandle = &g_vbgldata.IdcHandle;
+ return VINF_SUCCESS;
+}
+
+
+DECLR0VBGL(int) VbglR0QueryHostFeatures(uint32_t *pfHostFeatures)
+{
+ if (g_vbgldata.status == VbglStatusReady)
+ *pfHostFeatures = g_vbgldata.hostVersion.features;
+ else
+ {
+ int rc = vbglR0QueryDriverInfo();
+ if (g_vbgldata.status != VbglStatusReady)
+ return rc;
+ *pfHostFeatures = g_vbgldata.hostVersion.features;
+ }
+
+ return VINF_SUCCESS;
+}
+
+#endif /* !VBGL_VBOXGUEST */
+
diff --git a/ubuntu/vbox/vboxsf/VBoxGuestR0LibPhysHeap.c b/ubuntu/vbox/vboxsf/VBoxGuestR0LibPhysHeap.c
new file mode 100644
index 000000000000..0cd11db02da0
--- /dev/null
+++ b/ubuntu/vbox/vboxsf/VBoxGuestR0LibPhysHeap.c
@@ -0,0 +1,664 @@
+/* $Id: VBoxGuestR0LibPhysHeap.cpp $ */
+/** @file
+ * VBoxGuestLibR0 - Physical memory heap.
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "VBoxGuestR0LibInternal.h"
+
+#include <iprt/assert.h>
+#include <iprt/semaphore.h>
+#include <iprt/alloc.h>
+
+/* Physical memory heap consists of double linked list
+ * of chunks. Memory blocks are allocated inside these chunks
+ * and are members of Allocated and Free double linked lists.
+ *
+ * When allocating a block, we search in Free linked
+ * list for a suitable free block. If there is no such block,
+ * a new chunk is allocated and the new block is taken from
+ * the new chunk as the only chunk-sized free block.
+ * Allocated block is excluded from the Free list and goes to
+ * Alloc list.
+ *
+ * When freeing block, we check the pointer and then
+ * exclude block from Alloc list and move it to free list.
+ *
+ * For each chunk we maintain the allocated blocks counter.
+ * if 2 (or more) entire chunks are free they are immediately
+ * deallocated, so we always have at most 1 free chunk.
+ *
+ * When freeing blocks, two subsequent free blocks are always
+ * merged together. Current implementation merges blocks only
+ * when there is a block after the just freed one.
+ *
+ */
+
+#define VBGL_PH_ASSERT Assert
+#define VBGL_PH_ASSERTMsg AssertMsg
+
+// #define DUMPHEAP
+
+#ifdef DUMPHEAP
+# define VBGL_PH_dprintf(a) RTAssertMsg2Weak a
+#else
+# define VBGL_PH_dprintf(a)
+#endif
+
+/* Heap block signature */
+#define VBGL_PH_BLOCKSIGNATURE (0xADDBBBBB)
+
+
+/* Heap chunk signature */
+#define VBGL_PH_CHUNKSIGNATURE (0xADDCCCCC)
+/* Heap chunk allocation unit */
+#define VBGL_PH_CHUNKSIZE (0x10000)
+
+/* Heap block bit flags */
+#define VBGL_PH_BF_ALLOCATED (0x1)
+
+struct _VBGLPHYSHEAPBLOCK
+{
+ uint32_t u32Signature;
+
+ /* Size of user data in the block. Does not include the block header. */
+ uint32_t cbDataSize;
+
+ uint32_t fu32Flags;
+
+ struct _VBGLPHYSHEAPBLOCK *pNext;
+ struct _VBGLPHYSHEAPBLOCK *pPrev;
+
+ struct _VBGLPHYSHEAPCHUNK *pChunk;
+};
+
+struct _VBGLPHYSHEAPCHUNK
+{
+ uint32_t u32Signature;
+
+ /* Size of the chunk. Includes the chunk header. */
+ uint32_t cbSize;
+
+ /* Physical address of the chunk */
+ uint32_t physAddr;
+
+ /* Number of allocated blocks in the chunk */
+ int32_t cAllocatedBlocks;
+
+ struct _VBGLPHYSHEAPCHUNK *pNext;
+ struct _VBGLPHYSHEAPCHUNK *pPrev;
+};
+
+
+#ifndef DUMPHEAP
+#define dumpheap(a)
+#else
+void dumpheap (char *point)
+{
+ VBGL_PH_dprintf(("VBGL_PH dump at '%s'\n", point));
+
+ VBGL_PH_dprintf(("Chunks:\n"));
+
+ VBGLPHYSHEAPCHUNK *pChunk = g_vbgldata.pChunkHead;
+
+ while (pChunk)
+ {
+ VBGL_PH_dprintf(("%p: pNext = %p, pPrev = %p, sign = %08X, size = %8d, allocated = %8d, phys = %08X\n",
+ pChunk, pChunk->pNext, pChunk->pPrev, pChunk->u32Signature, pChunk->cbSize, pChunk->cAllocatedBlocks, pChunk->physAddr));
+
+ pChunk = pChunk->pNext;
+ }
+
+ VBGL_PH_dprintf(("Allocated blocks:\n"));
+
+ VBGLPHYSHEAPBLOCK *pBlock = g_vbgldata.pAllocBlocksHead;
+
+ while (pBlock)
+ {
+ VBGL_PH_dprintf(("%p: pNext = %p, pPrev = %p, sign = %08X, size = %8d, flags = %08X, pChunk = %p\n",
+ pBlock, pBlock->pNext, pBlock->pPrev, pBlock->u32Signature, pBlock->cbDataSize, pBlock->fu32Flags, pBlock->pChunk));
+
+ pBlock = pBlock->pNext;
+ }
+
+ VBGL_PH_dprintf(("Free blocks:\n"));
+
+ pBlock = g_vbgldata.pFreeBlocksHead;
+
+ while (pBlock)
+ {
+ VBGL_PH_dprintf(("%p: pNext = %p, pPrev = %p, sign = %08X, size = %8d, flags = %08X, pChunk = %p\n",
+ pBlock, pBlock->pNext, pBlock->pPrev, pBlock->u32Signature, pBlock->cbDataSize, pBlock->fu32Flags, pBlock->pChunk));
+
+ pBlock = pBlock->pNext;
+ }
+
+ VBGL_PH_dprintf(("VBGL_PH dump at '%s' done\n", point));
+}
+#endif
+
+
+DECLINLINE(void *) vbglPhysHeapBlock2Data (VBGLPHYSHEAPBLOCK *pBlock)
+{
+ return (void *)(pBlock? (char *)pBlock + sizeof (VBGLPHYSHEAPBLOCK): NULL);
+}
+
+DECLINLINE(VBGLPHYSHEAPBLOCK *) vbglPhysHeapData2Block (void *p)
+{
+ VBGLPHYSHEAPBLOCK *pBlock = (VBGLPHYSHEAPBLOCK *)(p? (char *)p - sizeof (VBGLPHYSHEAPBLOCK): NULL);
+
+ VBGL_PH_ASSERTMsg(pBlock == NULL || pBlock->u32Signature == VBGL_PH_BLOCKSIGNATURE,
+ ("pBlock->u32Signature = %08X\n", pBlock->u32Signature));
+
+ return pBlock;
+}
+
+DECLINLINE(int) vbglPhysHeapEnter (void)
+{
+ int rc = RTSemFastMutexRequest(g_vbgldata.mutexHeap);
+
+ VBGL_PH_ASSERTMsg(RT_SUCCESS(rc),
+ ("Failed to request heap mutex, rc = %Rrc\n", rc));
+
+ return rc;
+}
+
+DECLINLINE(void) vbglPhysHeapLeave (void)
+{
+ RTSemFastMutexRelease(g_vbgldata.mutexHeap);
+}
+
+
+static void vbglPhysHeapInitBlock (VBGLPHYSHEAPBLOCK *pBlock, VBGLPHYSHEAPCHUNK *pChunk, uint32_t cbDataSize)
+{
+ VBGL_PH_ASSERT(pBlock != NULL);
+ VBGL_PH_ASSERT(pChunk != NULL);
+
+ pBlock->u32Signature = VBGL_PH_BLOCKSIGNATURE;
+ pBlock->cbDataSize = cbDataSize;
+ pBlock->fu32Flags = 0;
+ pBlock->pNext = NULL;
+ pBlock->pPrev = NULL;
+ pBlock->pChunk = pChunk;
+}
+
+
+static void vbglPhysHeapInsertBlock (VBGLPHYSHEAPBLOCK *pInsertAfter, VBGLPHYSHEAPBLOCK *pBlock)
+{
+ VBGL_PH_ASSERTMsg(pBlock->pNext == NULL,
+ ("pBlock->pNext = %p\n", pBlock->pNext));
+ VBGL_PH_ASSERTMsg(pBlock->pPrev == NULL,
+ ("pBlock->pPrev = %p\n", pBlock->pPrev));
+
+ if (pInsertAfter)
+ {
+ pBlock->pNext = pInsertAfter->pNext;
+ pBlock->pPrev = pInsertAfter;
+
+ if (pInsertAfter->pNext)
+ {
+ pInsertAfter->pNext->pPrev = pBlock;
+ }
+
+ pInsertAfter->pNext = pBlock;
+ }
+ else
+ {
+ /* inserting to head of list */
+ pBlock->pPrev = NULL;
+
+ if (pBlock->fu32Flags & VBGL_PH_BF_ALLOCATED)
+ {
+ pBlock->pNext = g_vbgldata.pAllocBlocksHead;
+
+ if (g_vbgldata.pAllocBlocksHead)
+ {
+ g_vbgldata.pAllocBlocksHead->pPrev = pBlock;
+ }
+
+ g_vbgldata.pAllocBlocksHead = pBlock;
+ }
+ else
+ {
+ pBlock->pNext = g_vbgldata.pFreeBlocksHead;
+
+ if (g_vbgldata.pFreeBlocksHead)
+ {
+ g_vbgldata.pFreeBlocksHead->pPrev = pBlock;
+ }
+
+ g_vbgldata.pFreeBlocksHead = pBlock;
+ }
+ }
+}
+
+static void vbglPhysHeapExcludeBlock (VBGLPHYSHEAPBLOCK *pBlock)
+{
+ if (pBlock->pNext)
+ {
+ pBlock->pNext->pPrev = pBlock->pPrev;
+ }
+ else
+ {
+ /* this is tail of list but we do not maintain tails of block lists.
+ * so do nothing.
+ */
+ ;
+ }
+
+ if (pBlock->pPrev)
+ {
+ pBlock->pPrev->pNext = pBlock->pNext;
+ }
+ else
+ {
+ /* this is head of list but we do not maintain tails of block lists. */
+ if (pBlock->fu32Flags & VBGL_PH_BF_ALLOCATED)
+ {
+ g_vbgldata.pAllocBlocksHead = pBlock->pNext;
+ }
+ else
+ {
+ g_vbgldata.pFreeBlocksHead = pBlock->pNext;
+ }
+ }
+
+ pBlock->pNext = NULL;
+ pBlock->pPrev = NULL;
+}
+
+static VBGLPHYSHEAPBLOCK *vbglPhysHeapChunkAlloc (uint32_t cbSize)
+{
+ RTCCPHYS physAddr;
+ VBGLPHYSHEAPCHUNK *pChunk;
+ VBGLPHYSHEAPBLOCK *pBlock;
+ VBGL_PH_dprintf(("Allocating new chunk of size %d\n", cbSize));
+
+ /* Compute chunk size to allocate */
+ if (cbSize < VBGL_PH_CHUNKSIZE)
+ {
+ /* Includes case of block size 0 during initialization */
+ cbSize = VBGL_PH_CHUNKSIZE;
+ }
+ else
+ {
+ /* Round up to next chunk size, which must be power of 2 */
+ cbSize = (cbSize + (VBGL_PH_CHUNKSIZE - 1)) & ~(VBGL_PH_CHUNKSIZE - 1);
+ }
+
+ physAddr = 0;
+ /* This function allocates physical contiguous memory (below 4GB) according to the IPRT docs.
+ * Address < 4G is required for the port IO.
+ */
+ pChunk = (VBGLPHYSHEAPCHUNK *)RTMemContAlloc (&physAddr, cbSize);
+
+ if (!pChunk)
+ {
+ LogRel(("vbglPhysHeapChunkAlloc: failed to alloc %u contiguous bytes.\n", cbSize));
+ return NULL;
+ }
+
+ AssertRelease(physAddr < _4G && physAddr + cbSize <= _4G);
+
+ pChunk->u32Signature = VBGL_PH_CHUNKSIGNATURE;
+ pChunk->cbSize = cbSize;
+ pChunk->physAddr = (uint32_t)physAddr;
+ pChunk->cAllocatedBlocks = 0;
+ pChunk->pNext = g_vbgldata.pChunkHead;
+ pChunk->pPrev = NULL;
+
+ /* Initialize the free block, which now occupies entire chunk. */
+ pBlock = (VBGLPHYSHEAPBLOCK *)((char *)pChunk + sizeof (VBGLPHYSHEAPCHUNK));
+
+ vbglPhysHeapInitBlock (pBlock, pChunk, cbSize - sizeof (VBGLPHYSHEAPCHUNK) - sizeof (VBGLPHYSHEAPBLOCK));
+
+ vbglPhysHeapInsertBlock (NULL, pBlock);
+
+ g_vbgldata.pChunkHead = pChunk;
+
+ VBGL_PH_dprintf(("Allocated chunk %p, block = %p size=%x\n", pChunk, pBlock, cbSize));
+
+ return pBlock;
+}
+
+
+static void vbglPhysHeapChunkDelete (VBGLPHYSHEAPCHUNK *pChunk)
+{
+ char *p;
+ VBGL_PH_ASSERT(pChunk != NULL);
+ VBGL_PH_ASSERTMsg(pChunk->u32Signature == VBGL_PH_CHUNKSIGNATURE,
+ ("pChunk->u32Signature = %08X\n", pChunk->u32Signature));
+
+ VBGL_PH_dprintf(("Deleting chunk %p size %x\n", pChunk, pChunk->cbSize));
+
+ /* first scan the chunk and exclude all blocks from lists */
+
+ p = (char *)pChunk + sizeof (VBGLPHYSHEAPCHUNK);
+
+ while (p < (char *)pChunk + pChunk->cbSize)
+ {
+ VBGLPHYSHEAPBLOCK *pBlock = (VBGLPHYSHEAPBLOCK *)p;
+
+ p += pBlock->cbDataSize + sizeof (VBGLPHYSHEAPBLOCK);
+
+ vbglPhysHeapExcludeBlock (pBlock);
+ }
+
+ VBGL_PH_ASSERTMsg(p == (char *)pChunk + pChunk->cbSize,
+ ("p = %p, (char *)pChunk + pChunk->cbSize = %p, pChunk->cbSize = %08X\n",
+ p, (char *)pChunk + pChunk->cbSize, pChunk->cbSize));
+
+ /* Exclude chunk from the chunk list */
+ if (pChunk->pNext)
+ {
+ pChunk->pNext->pPrev = pChunk->pPrev;
+ }
+ else
+ {
+ /* we do not maintain tail */
+ ;
+ }
+
+ if (pChunk->pPrev)
+ {
+ pChunk->pPrev->pNext = pChunk->pNext;
+ }
+ else
+ {
+ /* the chunk was head */
+ g_vbgldata.pChunkHead = pChunk->pNext;
+ }
+
+ RTMemContFree (pChunk, pChunk->cbSize);
+}
+
+
+DECLR0VBGL(void *) VbglR0PhysHeapAlloc (uint32_t cbSize)
+{
+ VBGLPHYSHEAPBLOCK *pBlock, *pIter;
+ int rc = vbglPhysHeapEnter ();
+
+ if (RT_FAILURE(rc))
+ return NULL;
+
+ dumpheap ("pre alloc");
+
+ /*
+ * Search the free list. We do this in linear fashion as we don't expect
+ * there to be many blocks in the heap.
+ */
+
+ pBlock = NULL;
+ if (cbSize <= PAGE_SIZE / 4 * 3)
+ {
+ /* Smaller than 3/4 page: Prefer a free block that can keep the request within a single page,
+ so HGCM processing in VMMDev can use page locks instead of several reads and writes. */
+
+ VBGLPHYSHEAPBLOCK *pFallback = NULL;
+ for (pIter = g_vbgldata.pFreeBlocksHead; pIter != NULL; pIter = pIter->pNext)
+ if (pIter->cbDataSize >= cbSize)
+ {
+ if (pIter->cbDataSize == cbSize)
+ {
+ if (PAGE_SIZE - ((uintptr_t)vbglPhysHeapBlock2Data(pIter) & PAGE_OFFSET_MASK) >= cbSize)
+ {
+ pBlock = pIter;
+ break;
+ }
+ pFallback = pIter;
+ }
+ else
+ {
+ if (!pFallback || pIter->cbDataSize < pFallback->cbDataSize)
+ pFallback = pIter;
+ if (PAGE_SIZE - ((uintptr_t)vbglPhysHeapBlock2Data(pIter) & PAGE_OFFSET_MASK) >= cbSize)
+ if (!pBlock || pIter->cbDataSize < pBlock->cbDataSize)
+ pBlock = pIter;
+ }
+ }
+
+ if (!pBlock)
+ pBlock = pFallback;
+ }
+ else
+ {
+ /* Large than 3/4 page: Find smallest free list match. */
+
+ for (pIter = g_vbgldata.pFreeBlocksHead; pIter != NULL; pIter = pIter->pNext)
+ if (pIter->cbDataSize >= cbSize)
+ {
+ if (pIter->cbDataSize == cbSize)
+ {
+ /* Exact match - we're done! */
+ pBlock = pIter;
+ break;
+ }
+
+ /* Looking for a free block with nearest size. */
+ if (!pBlock || pIter->cbDataSize < pBlock->cbDataSize)
+ pBlock = pIter;
+ }
+ }
+
+ if (!pBlock)
+ {
+ /* No free blocks, allocate a new chunk,
+ * the only free block of the chunk will
+ * be returned.
+ */
+ pBlock = vbglPhysHeapChunkAlloc (cbSize);
+ }
+
+ if (pBlock)
+ {
+ VBGL_PH_ASSERTMsg(pBlock->u32Signature == VBGL_PH_BLOCKSIGNATURE,
+ ("pBlock = %p, pBlock->u32Signature = %08X\n", pBlock, pBlock->u32Signature));
+ VBGL_PH_ASSERTMsg((pBlock->fu32Flags & VBGL_PH_BF_ALLOCATED) == 0,
+ ("pBlock = %p, pBlock->fu32Flags = %08X\n", pBlock, pBlock->fu32Flags));
+
+ /* We have a free block, either found or allocated. */
+
+ if (pBlock->cbDataSize > 2*(cbSize + sizeof (VBGLPHYSHEAPBLOCK)))
+ {
+ /* Data will occupy less than a half of the block,
+ * split off the tail end into a new free list entry.
+ */
+ pIter = (VBGLPHYSHEAPBLOCK *)((char *)pBlock + sizeof (VBGLPHYSHEAPBLOCK) + cbSize);
+
+ /* Init the new 'pIter' block, initialized blocks are always marked as free. */
+ vbglPhysHeapInitBlock (pIter, pBlock->pChunk, pBlock->cbDataSize - cbSize - sizeof (VBGLPHYSHEAPBLOCK));
+
+ pBlock->cbDataSize = cbSize;
+
+ /* Insert the new 'pIter' block after the 'pBlock' in the free list */
+ vbglPhysHeapInsertBlock (pBlock, pIter);
+ }
+
+ /* Exclude pBlock from free list */
+ vbglPhysHeapExcludeBlock (pBlock);
+
+ /* Mark as allocated */
+ pBlock->fu32Flags |= VBGL_PH_BF_ALLOCATED;
+
+ /* Insert to allocated list */
+ vbglPhysHeapInsertBlock (NULL, pBlock);
+
+ /* Adjust the chunk allocated blocks counter */
+ pBlock->pChunk->cAllocatedBlocks++;
+ }
+
+ dumpheap ("post alloc");
+
+ vbglPhysHeapLeave ();
+ VBGL_PH_dprintf(("VbglR0PhysHeapAlloc %x size %x\n", vbglPhysHeapBlock2Data (pBlock), pBlock->cbDataSize));
+
+ return vbglPhysHeapBlock2Data (pBlock);
+}
+
+DECLR0VBGL(uint32_t) VbglR0PhysHeapGetPhysAddr (void *p)
+{
+ uint32_t physAddr = 0;
+ VBGLPHYSHEAPBLOCK *pBlock = vbglPhysHeapData2Block (p);
+
+ if (pBlock)
+ {
+ VBGL_PH_ASSERTMsg((pBlock->fu32Flags & VBGL_PH_BF_ALLOCATED) != 0,
+ ("pBlock = %p, pBlock->fu32Flags = %08X\n", pBlock, pBlock->fu32Flags));
+
+ if (pBlock->fu32Flags & VBGL_PH_BF_ALLOCATED)
+ physAddr = pBlock->pChunk->physAddr + (uint32_t)((uintptr_t)p - (uintptr_t)pBlock->pChunk);
+ }
+
+ return physAddr;
+}
+
+DECLR0VBGL(void) VbglR0PhysHeapFree(void *p)
+{
+ VBGLPHYSHEAPBLOCK *pBlock;
+ VBGLPHYSHEAPBLOCK *pNeighbour;
+
+ int rc = vbglPhysHeapEnter ();
+ if (RT_FAILURE(rc))
+ return;
+
+ dumpheap ("pre free");
+
+ pBlock = vbglPhysHeapData2Block (p);
+
+ if (!pBlock)
+ {
+ vbglPhysHeapLeave ();
+ return;
+ }
+
+ VBGL_PH_ASSERTMsg((pBlock->fu32Flags & VBGL_PH_BF_ALLOCATED) != 0,
+ ("pBlock = %p, pBlock->fu32Flags = %08X\n", pBlock, pBlock->fu32Flags));
+
+ /* Exclude from allocated list */
+ vbglPhysHeapExcludeBlock (pBlock);
+
+ dumpheap ("post exclude");
+
+ VBGL_PH_dprintf(("VbglR0PhysHeapFree %x size %x\n", p, pBlock->cbDataSize));
+
+ /* Mark as free */
+ pBlock->fu32Flags &= ~VBGL_PH_BF_ALLOCATED;
+
+ /* Insert to free list */
+ vbglPhysHeapInsertBlock (NULL, pBlock);
+
+ dumpheap ("post insert");
+
+ /* Adjust the chunk allocated blocks counter */
+ pBlock->pChunk->cAllocatedBlocks--;
+
+ VBGL_PH_ASSERT(pBlock->pChunk->cAllocatedBlocks >= 0);
+
+ /* Check if we can merge 2 free blocks. To simplify heap maintenance,
+ * we will look at block after the just freed one.
+ * This will not prevent us from detecting free memory chunks.
+ * Also in most cases blocks are deallocated in reverse allocation order
+ * and in that case the merging will work.
+ */
+
+ pNeighbour = (VBGLPHYSHEAPBLOCK *)((char *)p + pBlock->cbDataSize);
+
+ if ((char *)pNeighbour < (char *)pBlock->pChunk + pBlock->pChunk->cbSize
+ && (pNeighbour->fu32Flags & VBGL_PH_BF_ALLOCATED) == 0)
+ {
+ /* The next block is free as well. */
+
+ /* Adjust size of current memory block */
+ pBlock->cbDataSize += pNeighbour->cbDataSize + sizeof (VBGLPHYSHEAPBLOCK);
+
+ /* Exclude the next neighbour */
+ vbglPhysHeapExcludeBlock (pNeighbour);
+ }
+
+ dumpheap ("post merge");
+
+ /* now check if there are 2 or more free chunks */
+ if (pBlock->pChunk->cAllocatedBlocks == 0)
+ {
+ VBGLPHYSHEAPCHUNK *pChunk = g_vbgldata.pChunkHead;
+
+ uint32_t u32FreeChunks = 0;
+
+ while (pChunk)
+ {
+ if (pChunk->cAllocatedBlocks == 0)
+ {
+ u32FreeChunks++;
+ }
+
+ pChunk = pChunk->pNext;
+ }
+
+ if (u32FreeChunks > 1)
+ {
+ /* Delete current chunk, it will also exclude all free blocks
+ * remaining in the chunk from the free list, so the pBlock
+ * will also be invalid after this.
+ */
+ vbglPhysHeapChunkDelete (pBlock->pChunk);
+ }
+ }
+
+ dumpheap ("post free");
+
+ vbglPhysHeapLeave ();
+}
+
+DECLR0VBGL(int) VbglR0PhysHeapInit (void)
+{
+ int rc = VINF_SUCCESS;
+
+ /* Allocate the first chunk of the heap. */
+ VBGLPHYSHEAPBLOCK *pBlock = vbglPhysHeapChunkAlloc (0);
+
+ if (!pBlock)
+ rc = VERR_NO_MEMORY;
+
+ RTSemFastMutexCreate(&g_vbgldata.mutexHeap);
+
+ return rc;
+}
+
+DECLR0VBGL(void) VbglR0PhysHeapTerminate (void)
+{
+ while (g_vbgldata.pChunkHead)
+ {
+ vbglPhysHeapChunkDelete (g_vbgldata.pChunkHead);
+ }
+
+ RTSemFastMutexDestroy(g_vbgldata.mutexHeap);
+}
+
diff --git a/ubuntu/vbox/vboxsf/VBoxGuestR0LibSharedFolders.c b/ubuntu/vbox/vboxsf/VBoxGuestR0LibSharedFolders.c
index 13db0c1e476f..80164ab8d1ee 100644
--- a/ubuntu/vbox/vboxsf/VBoxGuestR0LibSharedFolders.c
+++ b/ubuntu/vbox/vboxsf/VBoxGuestR0LibSharedFolders.c
@@ -50,9 +50,6 @@
/*********************************************************************************************************************************
* Defined Constants And Macros *
*********************************************************************************************************************************/
-#define SHFL_CPARMS_SET_UTF8 0
-#define SHFL_CPARMS_SET_SYMLINKS 0
-
#define VBOX_INIT_CALL(a, b, c) \
LogFunc(("%s, idClient=%d\n", "SHFL_FN_" # b, (c)->idClient)); \
VBGL_HGCM_HDR_INIT(a, (c)->idClient, SHFL_FN_##b, SHFL_CPARMS_##b); \
@@ -65,10 +62,6 @@
-/** @todo We only need HGCM, not physical memory, so other guests should also
- * switch to calling vbglR0HGCMInit() and vbglR0HGCMTerminate() instead
- * of VbglR0SfInit() and VbglR0SfTerm(). */
-#ifndef RT_OS_LINUX
DECLVBGL(int) VbglR0SfInit(void)
{
return VbglR0InitClient();
@@ -78,7 +71,6 @@ DECLVBGL(void) VbglR0SfTerm(void)
{
VbglR0TerminateClient();
}
-#endif
DECLVBGL(int) VbglR0SfConnect(PVBGLSFCLIENT pClient)
{
@@ -105,6 +97,19 @@ DECLVBGL(void) VbglR0SfDisconnect(PVBGLSFCLIENT pClient)
return;
}
+#if !defined(RT_OS_LINUX)
+
+DECLVBGL(int) VbglR0SfSetUtf8(PVBGLSFCLIENT pClient)
+{
+ int rc;
+ VBGLIOCHGCMCALL callInfo;
+
+ VBOX_INIT_CALL(&callInfo, SET_UTF8, pClient);
+ rc = VbglR0HGCMCall(pClient->handle, &callInfo, sizeof(callInfo));
+/* Log(("VBOXSF: VbglR0SfSetUtf8: VbglR0HGCMCall rc = %#x, result = %#x\n", rc, data.callInfo.Hdr.rc)); */
+ return rc;
+}
+
/** @name Deprecated VBGL shared folder helpers.
*
* @deprecated These are all use the slow VbglR0HGCMCall interface, that
@@ -626,17 +631,6 @@ DECLVBGL(int) VbglR0SfLock(PVBGLSFCLIENT pClient, PVBGLSFMAP pMap, SHFLHANDLE hF
return rc;
}
-DECLVBGL(int) VbglR0SfSetUtf8(PVBGLSFCLIENT pClient)
-{
- int rc;
- VBGLIOCHGCMCALL callInfo;
-
- VBOX_INIT_CALL(&callInfo, SET_UTF8, pClient);
- rc = VbglR0HGCMCall(pClient->handle, &callInfo, sizeof(callInfo));
-/* Log(("VBOXSF: VbglR0SfSetUtf8: VbglR0HGCMCall rc = %#x, result = %#x\n", rc, data.callInfo.Hdr.rc)); */
- return rc;
-}
-
DECLVBGL(int) VbglR0SfReadLink(PVBGLSFCLIENT pClient, PVBGLSFMAP pMap, PSHFLSTRING pParsedPath, uint32_t cbBuffer, uint8_t *pBuffer)
{
int rc;
@@ -699,6 +693,7 @@ DECLVBGL(int) VbglR0SfSetSymlinks(PVBGLSFCLIENT pClient)
return rc;
}
+#endif /* !RT_OS_LINUX */
/** @} */
diff --git a/ubuntu/vbox/vboxsf/dirops.c b/ubuntu/vbox/vboxsf/dirops.c
index 9b14382f5e7e..7db680645a6b 100644
--- a/ubuntu/vbox/vboxsf/dirops.c
+++ b/ubuntu/vbox/vboxsf/dirops.c
@@ -28,70 +28,116 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
#include "vfsmod.h"
#include <iprt/err.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0)
+# define d_in_lookup(a_pDirEntry) (d_unhashed(a_pDirEntry))
+#endif
+
+
+
/**
- * Open a directory. Read the complete content into a buffer.
+ * Open a directory (implements file_operations::open).
*
- * @param inode inode
- * @param file file
- * @returns 0 on success, Linux error code otherwise
+ * @returns 0 on success, negative errno otherwise.
+ * @param inode inode
+ * @param file file
*/
-static int sf_dir_open(struct inode *inode, struct file *file)
+static int vbsf_dir_open(struct inode *inode, struct file *file)
{
- int rc;
- int err;
- struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
- struct sf_dir_info *sf_d;
- struct sf_inode_info *sf_i = GET_INODE_INFO(inode);
- SHFLCREATEPARMS params;
-
- TRACE();
- BUG_ON(!sf_g);
- BUG_ON(!sf_i);
-
- if (file->private_data) {
- LogFunc(("sf_dir_open() called on already opened directory '%s'\n", sf_i->path->String.utf8));
- return 0;
- }
-
- sf_d = sf_dir_info_alloc();
- if (!sf_d) {
- LogRelFunc(("could not allocate directory info for '%s'\n",
- sf_i->path->String.utf8));
- return -ENOMEM;
- }
-
- RT_ZERO(params);
- params.Handle = SHFL_HANDLE_NIL;
- params.CreateFlags = 0
- | SHFL_CF_DIRECTORY
- | SHFL_CF_ACT_OPEN_IF_EXISTS
- | SHFL_CF_ACT_FAIL_IF_NEW | SHFL_CF_ACCESS_READ;
-
- LogFunc(("sf_dir_open(): calling VbglR0SfCreate, folder %s, flags %#x\n", sf_i->path->String.utf8, params.CreateFlags));
- rc = VbglR0SfCreate(&client_handle, &sf_g->map, sf_i->path, ¶ms);
- if (RT_SUCCESS(rc)) {
- if (params.Result == SHFL_FILE_EXISTS) {
- err = sf_dir_read_all(sf_g, sf_i, sf_d, params.Handle);
- if (!err)
- file->private_data = sf_d;
- } else
- err = -ENOENT;
-
- rc = VbglR0SfClose(&client_handle, &sf_g->map, params.Handle);
- if (RT_FAILURE(rc))
- LogFunc(("sf_dir_open(): VbglR0SfClose(%s) after err=%d failed rc=%Rrc\n", sf_i->path->String.utf8, err, rc));
- } else
- err = -EPERM;
-
- if (err)
- sf_dir_info_free(sf_d);
-
- return err;
+ struct vbsf_super_info *pSuperInfo = VBSF_GET_SUPER_INFO(inode->i_sb);
+ struct vbsf_inode_info *sf_i = VBSF_GET_INODE_INFO(inode);
+ struct dentry *dentry = VBSF_GET_F_DENTRY(file);
+ struct vbsf_dir_info *sf_d;
+ int rc;
+
+ SFLOGFLOW(("vbsf_dir_open: inode=%p file=%p %s\n", inode, file, sf_i && sf_i->path ? sf_i->path->String.ach : NULL));
+ AssertReturn(pSuperInfo, -EINVAL);
+ AssertReturn(sf_i, -EINVAL);
+ AssertReturn(!file->private_data, 0);
+
+ /*
+ * Allocate and initialize our directory info structure.
+ * We delay buffer allocation until vbsf_getdent is actually used.
+ */
+ sf_d = kmalloc(sizeof(*sf_d), GFP_KERNEL);
+ if (sf_d) {
+ VBOXSFCREATEREQ *pReq;
+ RT_ZERO(*sf_d);
+ sf_d->u32Magic = VBSF_DIR_INFO_MAGIC;
+ sema_init(&sf_d->Lock, 1);
+
+ /*
+ * Try open the directory.
+ */
+ pReq = (VBOXSFCREATEREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF(VBOXSFCREATEREQ, StrPath.String) + sf_i->path->u16Size);
+ if (pReq) {
+ memcpy(&pReq->StrPath, sf_i->path, SHFLSTRING_HEADER_SIZE + sf_i->path->u16Size);
+ RT_ZERO(pReq->CreateParms);
+ pReq->CreateParms.Handle = SHFL_HANDLE_NIL;
+ pReq->CreateParms.CreateFlags = SHFL_CF_DIRECTORY
+ | SHFL_CF_ACT_OPEN_IF_EXISTS
+ | SHFL_CF_ACT_FAIL_IF_NEW
+ | SHFL_CF_ACCESS_READ;
+
+ LogFunc(("calling VbglR0SfHostReqCreate on folder %s, flags %#x\n",
+ sf_i->path->String.utf8, pReq->CreateParms.CreateFlags));
+ rc = VbglR0SfHostReqCreate(pSuperInfo->map.root, pReq);
+ if (RT_SUCCESS(rc)) {
+ if (pReq->CreateParms.Result == SHFL_FILE_EXISTS) {
+ Assert(pReq->CreateParms.Handle != SHFL_HANDLE_NIL);
+
+ /*
+ * Update the inode info with fresh stats and increase the TTL for the
+ * dentry cache chain that got us here.
+ */
+ vbsf_update_inode(inode, sf_i, &pReq->CreateParms.Info, pSuperInfo,
+ true /*fLocked*/ /** @todo inode locking */, 0 /*fSetAttrs*/);
+ vbsf_dentry_chain_increase_ttl(dentry);
+
+ sf_d->Handle.hHost = pReq->CreateParms.Handle;
+ sf_d->Handle.cRefs = 1;
+ sf_d->Handle.fFlags = VBSF_HANDLE_F_READ | VBSF_HANDLE_F_DIR | VBSF_HANDLE_F_MAGIC;
+ vbsf_handle_append(sf_i, &sf_d->Handle);
+
+ file->private_data = sf_d;
+ VbglR0PhysHeapFree(pReq);
+ SFLOGFLOW(("vbsf_dir_open(%p,%p): returns 0; hHost=%#llx\n", inode, file, sf_d->Handle.hHost));
+ return 0;
+
+ }
+ Assert(pReq->CreateParms.Handle == SHFL_HANDLE_NIL);
+
+ /*
+ * Directory does not exist, so we probably got some invalid
+ * dir cache and inode info.
+ */
+ /** @todo do more to invalidate dentry and inode here. */
+ vbsf_dentry_invalidate_ttl(dentry);
+ sf_i->force_restat = true;
+ rc = -ENOENT;
+ } else
+ rc = -EPERM;
+ VbglR0PhysHeapFree(pReq);
+ } else {
+ LogRelMaxFunc(64, ("failed to allocate %zu bytes for '%s'\n",
+ RT_UOFFSETOF(VBOXSFCREATEREQ, StrPath.String) + sf_i->path->u16Size, sf_i->path->String.ach));
+ rc = -ENOMEM;
+ }
+ sf_d->u32Magic = VBSF_DIR_INFO_MAGIC_DEAD;
+ kfree(sf_d);
+ } else
+ rc = -ENOMEM;
+ SFLOGFLOW(("vbsf_dir_open(%p,%p): returns %d\n", inode, file, rc));
+ return rc;
}
+
/**
* This is called when reference count of [file] goes to zero. Notify
* the host that it can free whatever is associated with this directory
@@ -101,158 +147,168 @@ static int sf_dir_open(struct inode *inode, struct file *file)
* @param file file
* returns 0 on success, Linux error code otherwise
*/
-static int sf_dir_release(struct inode *inode, struct file *file)
+static int vbsf_dir_release(struct inode *inode, struct file *file)
{
- TRACE();
+ struct vbsf_dir_info *sf_d = (struct vbsf_dir_info *)file->private_data;
+
+ SFLOGFLOW(("vbsf_dir_release(%p,%p): sf_d=%p hHost=%#llx\n", inode, file, sf_d, sf_d ? sf_d->Handle.hHost : SHFL_HANDLE_NIL));
+
+ if (sf_d) {
+ struct vbsf_super_info *pSuperInfo = VBSF_GET_SUPER_INFO(inode->i_sb);
+
+ /* Invalidate the non-handle part. */
+ sf_d->u32Magic = VBSF_DIR_INFO_MAGIC_DEAD;
+ sf_d->cEntriesLeft = 0;
+ sf_d->cbValid = 0;
+ sf_d->pEntry = NULL;
+ sf_d->fNoMoreFiles = false;
+ if (sf_d->pBuf) {
+ kfree(sf_d->pBuf);
+ sf_d->pBuf = NULL;
+ }
- if (file->private_data)
- sf_dir_info_free(file->private_data);
+ /* Closes the handle and frees the structure when the last reference is released. */
+ vbsf_handle_release(&sf_d->Handle, pSuperInfo, "vbsf_dir_release");
+ }
- return 0;
+ return 0;
}
+
/**
- * Translate RTFMODE into DT_xxx (in conjunction to rtDirType())
- * @param fMode file mode
+ * Translate RTFMODE into DT_xxx (in conjunction to rtDirType()).
* returns d_type
+ * @param fMode file mode
*/
-static int sf_get_d_type(RTFMODE fMode)
+DECLINLINE(int) vbsf_get_d_type(RTFMODE fMode)
{
- int d_type;
- switch (fMode & RTFS_TYPE_MASK) {
- case RTFS_TYPE_FIFO:
- d_type = DT_FIFO;
- break;
- case RTFS_TYPE_DEV_CHAR:
- d_type = DT_CHR;
- break;
- case RTFS_TYPE_DIRECTORY:
- d_type = DT_DIR;
- break;
- case RTFS_TYPE_DEV_BLOCK:
- d_type = DT_BLK;
- break;
- case RTFS_TYPE_FILE:
- d_type = DT_REG;
- break;
- case RTFS_TYPE_SYMLINK:
- d_type = DT_LNK;
- break;
- case RTFS_TYPE_SOCKET:
- d_type = DT_SOCK;
- break;
- case RTFS_TYPE_WHITEOUT:
- d_type = DT_WHT;
- break;
- default:
- d_type = DT_UNKNOWN;
- break;
- }
- return d_type;
+ switch (fMode & RTFS_TYPE_MASK) {
+ case RTFS_TYPE_FIFO: return DT_FIFO;
+ case RTFS_TYPE_DEV_CHAR: return DT_CHR;
+ case RTFS_TYPE_DIRECTORY: return DT_DIR;
+ case RTFS_TYPE_DEV_BLOCK: return DT_BLK;
+ case RTFS_TYPE_FILE: return DT_REG;
+ case RTFS_TYPE_SYMLINK: return DT_LNK;
+ case RTFS_TYPE_SOCKET: return DT_SOCK;
+ case RTFS_TYPE_WHITEOUT: return DT_WHT;
+ }
+ return DT_UNKNOWN;
}
+
/**
- * Extract element ([dir]->f_pos) from the directory [dir] into [d_name].
+ * Refills the buffer with more entries.
*
- * @returns 0 for success, 1 for end reached, Linux error code otherwise.
+ * @returns 0 on success, negative errno on error,
*/
-static int sf_getdent(struct file *dir, char d_name[NAME_MAX], int *d_type)
+static int vbsf_dir_read_more(struct vbsf_dir_info *sf_d, struct vbsf_super_info *pSuperInfo, bool fRestart)
{
- loff_t cur;
- struct sf_glob_info *sf_g;
- struct sf_dir_info *sf_d;
- struct sf_inode_info *sf_i;
- struct inode *inode;
- struct list_head *pos, *list;
-
- TRACE();
-
- inode = GET_F_DENTRY(dir)->d_inode;
- sf_i = GET_INODE_INFO(inode);
- sf_g = GET_GLOB_INFO(inode->i_sb);
- sf_d = dir->private_data;
-
- BUG_ON(!sf_g);
- BUG_ON(!sf_d);
- BUG_ON(!sf_i);
-
- if (sf_i->force_reread) {
- int rc;
- int err;
- SHFLCREATEPARMS params;
-
- RT_ZERO(params);
- params.Handle = SHFL_HANDLE_NIL;
- params.CreateFlags = 0
- | SHFL_CF_DIRECTORY
- | SHFL_CF_ACT_OPEN_IF_EXISTS
- | SHFL_CF_ACT_FAIL_IF_NEW | SHFL_CF_ACCESS_READ;
-
- LogFunc(("sf_getdent: calling VbglR0SfCreate, folder %s, flags %#x\n", sf_i->path->String.utf8, params.CreateFlags));
- rc = VbglR0SfCreate(&client_handle, &sf_g->map, sf_i->path,
- ¶ms);
- if (RT_FAILURE(rc)) {
- LogFunc(("VbglR0SfCreate(%s) failed rc=%Rrc\n",
- sf_i->path->String.utf8, rc));
- return -EPERM;
- }
-
- if (params.Result != SHFL_FILE_EXISTS) {
- LogFunc(("directory %s does not exist\n",
- sf_i->path->String.utf8));
- sf_dir_info_free(sf_d);
- return -ENOENT;
- }
-
- sf_dir_info_empty(sf_d);
- err = sf_dir_read_all(sf_g, sf_i, sf_d, params.Handle);
- rc = VbglR0SfClose(&client_handle, &sf_g->map, params.Handle);
- if (RT_FAILURE(rc))
- LogFunc(("VbglR0SfClose(%s) failed rc=%Rrc\n",
- sf_i->path->String.utf8, rc));
- if (err)
- return err;
-
- sf_i->force_reread = 0;
- }
-
- cur = 0;
- list = &sf_d->info_list;
- list_for_each(pos, list) {
- struct sf_dir_buf *b;
- SHFLDIRINFO *info;
- loff_t i;
-
- b = list_entry(pos, struct sf_dir_buf, head);
- if (dir->f_pos >= cur + b->cEntries) {
- cur += b->cEntries;
- continue;
- }
-
- for (i = 0, info = b->buf; i < dir->f_pos - cur; ++i) {
- size_t size;
-
- size =
- offsetof(SHFLDIRINFO,
- name.String) + info->name.u16Size;
- info = (SHFLDIRINFO *) ((uintptr_t) info + size);
- }
-
- *d_type = sf_get_d_type(info->Info.Attr.fMode);
-
- return sf_nlscpy(sf_g, d_name, NAME_MAX,
- info->name.String.utf8, info->name.u16Length);
- }
-
- return 1;
+ int rc;
+ VBOXSFLISTDIRREQ *pReq;
+
+ /*
+ * Don't call the host again if we've reached the end of the
+ * directory entries already.
+ */
+ if (sf_d->fNoMoreFiles) {
+ if (!fRestart) {
+ SFLOGFLOW(("vbsf_dir_read_more: no more files\n"));
+ return 0;
+ }
+ sf_d->fNoMoreFiles = false;
+ }
+
+ /*
+ * Make sure we've got some kind of buffers.
+ */
+ if (sf_d->pBuf) {
+ /* Likely, except for the first time. */
+ } else {
+ sf_d->pBuf = (PSHFLDIRINFO)kmalloc(pSuperInfo->cbDirBuf, GFP_KERNEL);
+ if (sf_d->pBuf)
+ sf_d->cbBuf = pSuperInfo->cbDirBuf;
+ else {
+ sf_d->pBuf = (PSHFLDIRINFO)kmalloc(_4K, GFP_KERNEL);
+ if (!sf_d->pBuf) {
+ LogRelMax(10, ("vbsf_dir_read_more: Failed to allocate buffer!\n"));
+ return -ENOMEM;
+ }
+ sf_d->cbBuf = _4K;
+ }
+ }
+
+ /*
+ * Allocate a request buffer.
+ */
+ pReq = (VBOXSFLISTDIRREQ *)VbglR0PhysHeapAlloc(sizeof(*pReq));
+ if (pReq) {
+ rc = VbglR0SfHostReqListDirContig2x(pSuperInfo->map.root, pReq, sf_d->Handle.hHost, NULL, NIL_RTGCPHYS64,
+ fRestart ? SHFL_LIST_RESTART : SHFL_LIST_NONE,
+ sf_d->pBuf, virt_to_phys(sf_d->pBuf), sf_d->cbBuf);
+ if (RT_SUCCESS(rc)) {
+ sf_d->pEntry = sf_d->pBuf;
+ sf_d->cbValid = pReq->Parms.cb32Buffer.u.value32;
+ sf_d->cEntriesLeft = pReq->Parms.c32Entries.u.value32;
+ sf_d->fNoMoreFiles = pReq->Parms.f32More.u.value32 == 0;
+ } else {
+ sf_d->pEntry = sf_d->pBuf;
+ sf_d->cbValid = 0;
+ sf_d->cEntriesLeft = 0;
+ if (rc == VERR_NO_MORE_FILES) {
+ sf_d->fNoMoreFiles = true;
+ rc = 0;
+ } else {
+ /* In theory we could end up here with a buffer overflow, but
+ with a 4KB minimum buffer size that's very unlikely with the
+ typical filename length of today's file systems (2019). */
+ LogRelMax(16, ("vbsf_dir_read_more: VbglR0SfHostReqListDirContig2x -> %Rrc\n", rc));
+ rc = -EPROTO;
+ }
+ }
+ VbglR0PhysHeapFree(pReq);
+ } else
+ rc = -ENOMEM;
+ SFLOGFLOW(("vbsf_dir_read_more: returns %d; cbValid=%#x cEntriesLeft=%#x fNoMoreFiles=%d\n",
+ rc, sf_d->cbValid, sf_d->cEntriesLeft, sf_d->fNoMoreFiles));
+ return rc;
}
+
+/**
+ * Helper function for when we need to convert the name, avoids wasting stack in
+ * the UTF-8 code path.
+ */
+DECL_NO_INLINE(static, bool) vbsf_dir_emit_nls(
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)
+ struct dir_context *ctx,
+# else
+ void *opaque, filldir_t filldir, loff_t offPos,
+# endif
+ const char *pszSrcName, uint16_t cchSrcName, ino_t d_ino, int d_type,
+ struct vbsf_super_info *pSuperInfo)
+{
+ char szDstName[NAME_MAX];
+ int rc = vbsf_nlscpy(pSuperInfo, szDstName, sizeof(szDstName), pszSrcName, cchSrcName);
+ if (rc == 0) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)
+ return dir_emit(ctx, szDstName, strlen(szDstName), d_ino, d_type);
+#else
+ return filldir(opaque, szDstName, strlen(szDstName), offPos, d_ino, d_type) == 0;
+#endif
+ }
+
+ /* Assuming this is a buffer overflow issue, just silently skip it. */
+ SFLOGFLOW(("vbsf_dir_emit_nls: vbsf_nlscopy failed with %d for '%s'\n", rc, pszSrcName));
+ return true;
+}
+
+
/**
* This is called when vfs wants to populate internal buffers with
* directory [dir]s contents. [opaque] is an argument to the
* [filldir]. [filldir] magically modifies it's argument - [opaque]
* and takes following additional arguments (which i in turn get from
- * the host via sf_getdent):
+ * the host via vbsf_getdent):
*
* name : name of the entry (i must also supply it's length huh?)
* type : type of the entry (FILE | DIR | etc) (i ellect to use DT_UNKNOWN)
@@ -266,96 +322,323 @@ static int sf_getdent(struct file *dir, char d_name[NAME_MAX], int *d_type)
* Extract elements from the directory listing (incrementing f_pos
* along the way) and feed them to [filldir] until:
*
- * a. there are no more entries (i.e. sf_getdent set done to 1)
+ * a. there are no more entries (i.e. vbsf_getdent set done to 1)
* b. failure to compute fake inode number
* c. filldir returns an error (see comment on that)
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)
-static int sf_dir_iterate(struct file *dir, struct dir_context *ctx)
+static int vbsf_dir_iterate(struct file *dir, struct dir_context *ctx)
#else
-static int sf_dir_read(struct file *dir, void *opaque, filldir_t filldir)
+static int vbsf_dir_read(struct file *dir, void *opaque, filldir_t filldir)
#endif
{
- TRACE();
- for (;;) {
- int err;
- ino_t fake_ino;
- loff_t sanity;
- char d_name[NAME_MAX];
- int d_type = DT_UNKNOWN;
-
- err = sf_getdent(dir, d_name, &d_type);
- switch (err) {
- case 1:
- return 0;
-
- case 0:
- break;
-
- case -1:
- default:
- /* skip erroneous entry and proceed */
- LogFunc(("sf_getdent error %d\n", err));
- dir->f_pos += 1;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)
- ctx->pos += 1;
+ loff_t offPos = ctx->pos;
+#else
+ loff_t offPos = dir->f_pos;
#endif
- continue;
- }
-
- /* d_name now contains a valid entry name */
-
+ struct vbsf_dir_info *sf_d = (struct vbsf_dir_info *)dir->private_data;
+ struct vbsf_super_info *pSuperInfo = VBSF_GET_SUPER_INFO(VBSF_GET_F_DENTRY(dir)->d_sb);
+ int rc;
+
+ /*
+ * Lock the directory info structures.
+ */
+ if (RT_LIKELY(down_interruptible(&sf_d->Lock) == 0)) {
+ /* likely */
+ } else
+ return -ERESTARTSYS;
+
+ /*
+ * Any seek performed in the mean time?
+ */
+ if (offPos == sf_d->offPos) {
+ /* likely */
+ } else {
+ /* Restart the search if iPos is lower than the current buffer position. */
+ loff_t offCurEntry = sf_d->offPos;
+ if (offPos < offCurEntry) {
+ rc = vbsf_dir_read_more(sf_d, pSuperInfo, true /*fRestart*/);
+ if (rc == 0)
+ offCurEntry = 0;
+ else {
+ up(&sf_d->Lock);
+ return rc;
+ }
+ }
+
+ /* Skip ahead to offPos. */
+ while (offCurEntry < offPos) {
+ uint32_t cEntriesLeft = sf_d->cEntriesLeft;
+ if ((uint64_t)(offPos - offCurEntry) >= cEntriesLeft) {
+ /* Skip the current buffer and read the next: */
+ offCurEntry += cEntriesLeft;
+ sf_d->offPos = offCurEntry;
+ sf_d->cEntriesLeft = 0;
+ rc = vbsf_dir_read_more(sf_d, pSuperInfo, false /*fRestart*/);
+ if (rc != 0 || sf_d->cEntriesLeft == 0) {
+ up(&sf_d->Lock);
+ return rc;
+ }
+ } else {
+ do
+ {
+ PSHFLDIRINFO pEntry = sf_d->pEntry;
+ pEntry = (PSHFLDIRINFO)&pEntry->name.String.utf8[pEntry->name.u16Length];
+ AssertLogRelBreakStmt( cEntriesLeft == 1
+ || (uintptr_t)pEntry - (uintptr_t)sf_d->pBuf
+ <= sf_d->cbValid - RT_UOFFSETOF(SHFLDIRINFO, name.String),
+ sf_d->cEntriesLeft = 0);
+ sf_d->cEntriesLeft = --cEntriesLeft;
+ sf_d->offPos = ++offCurEntry;
+ } while (offPos < sf_d->offPos);
+ }
+ }
+ }
+
+ /*
+ * Handle '.' and '..' specially so we get the inode numbers right.
+ * We'll skip any '.' or '..' returned by the host (included in pos,
+ * however, to simplify the above skipping code).
+ */
+ if (offPos < 2) {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)
- sanity = ctx->pos + 0xbeef;
+ if (offPos == 0) {
+ if (dir_emit_dot(dir, ctx))
+ dir->f_pos = ctx->pos = sf_d->offPos = offPos = 1;
+ else {
+ up(&sf_d->Lock);
+ return 0;
+ }
+ }
+ if (offPos == 1) {
+ if (dir_emit_dotdot(dir, ctx))
+ dir->f_pos = ctx->pos = sf_d->offPos = offPos = 2;
+ else {
+ up(&sf_d->Lock);
+ return 0;
+ }
+ }
#else
- sanity = dir->f_pos + 0xbeef;
+ if (offPos == 0) {
+ rc = filldir(opaque, ".", 1, 0, VBSF_GET_F_DENTRY(dir)->d_inode->i_ino, DT_DIR);
+ if (!rc)
+ dir->f_pos = sf_d->offPos = offPos = 1;
+ else {
+ up(&sf_d->Lock);
+ return 0;
+ }
+ }
+ if (offPos == 1) {
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 5)
+ rc = filldir(opaque, "..", 2, 1, parent_ino(VBSF_GET_F_DENTRY(dir)), DT_DIR);
+# else
+ rc = filldir(opaque, "..", 2, 1, VBSF_GET_F_DENTRY(dir)->d_parent->d_inode->i_ino, DT_DIR);
+# endif
+ if (!rc)
+ dir->f_pos = sf_d->offPos = offPos = 2;
+ else {
+ up(&sf_d->Lock);
+ return 0;
+ }
+ }
#endif
- fake_ino = sanity;
- if (sanity - fake_ino) {
- LogRelFunc(("can not compute ino\n"));
- return -EINVAL;
- }
+ }
+
+ /*
+ * Produce stuff.
+ */
+ Assert(offPos == sf_d->offPos);
+ for (;;) {
+ PSHFLDIRINFO pBuf;
+ PSHFLDIRINFO pEntry;
+
+ /*
+ * Do we need to read more?
+ */
+ uint32_t cbValid = sf_d->cbValid;
+ uint32_t cEntriesLeft = sf_d->cEntriesLeft;
+ if (!cEntriesLeft) {
+ rc = vbsf_dir_read_more(sf_d, pSuperInfo, false /*fRestart*/);
+ if (rc == 0) {
+ cEntriesLeft = sf_d->cEntriesLeft;
+ if (!cEntriesLeft) {
+ up(&sf_d->Lock);
+ return 0;
+ }
+ cbValid = sf_d->cbValid;
+ } else {
+ up(&sf_d->Lock);
+ return rc;
+ }
+ }
+
+ /*
+ * Feed entries to the caller.
+ */
+ pBuf = sf_d->pBuf;
+ pEntry = sf_d->pEntry;
+ do {
+ /*
+ * Validate the entry in case the host is messing with us.
+ * We're ASSUMING the host gives us a zero terminated string (UTF-8) here.
+ */
+ uintptr_t const offEntryInBuf = (uintptr_t)pEntry - (uintptr_t)pBuf;
+ uint16_t cbSrcName;
+ uint16_t cchSrcName;
+ AssertLogRelMsgBreak(offEntryInBuf + RT_UOFFSETOF(SHFLDIRINFO, name.String) <= cbValid,
+ ("%#llx + %#x vs %#x\n", offEntryInBuf, RT_UOFFSETOF(SHFLDIRINFO, name.String), cbValid));
+ cbSrcName = pEntry->name.u16Size;
+ cchSrcName = pEntry->name.u16Length;
+ AssertLogRelBreak(offEntryInBuf + RT_UOFFSETOF(SHFLDIRINFO, name.String) + cbSrcName <= cbValid);
+ AssertLogRelBreak(cchSrcName < cbSrcName);
+ AssertLogRelBreak(pEntry->name.String.ach[cchSrcName] == '\0');
+
+ /*
+ * Filter out '.' and '..' entires.
+ */
+ if ( cchSrcName > 2
+ || pEntry->name.String.ach[0] != '.'
+ || ( cchSrcName == 2
+ && pEntry->name.String.ach[1] != '.')) {
+ int const d_type = vbsf_get_d_type(pEntry->Info.Attr.fMode);
+ ino_t const d_ino = (ino_t)offPos + 0xbeef; /* very fake */
+ bool fContinue;
+ if (pSuperInfo->fNlsIsUtf8) {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)
- if (!dir_emit(ctx, d_name, strlen(d_name), fake_ino, d_type)) {
- LogFunc(("dir_emit failed\n"));
- return 0;
- }
+ fContinue = dir_emit(ctx, pEntry->name.String.ach, cchSrcName, d_ino, d_type);
#else
- err =
- filldir(opaque, d_name, strlen(d_name), dir->f_pos,
- fake_ino, d_type);
- if (err) {
- LogFunc(("filldir returned error %d\n", err));
- /* Rely on the fact that filldir returns error
- only when it runs out of space in opaque */
- return 0;
- }
+ fContinue = filldir(opaque, pEntry->name.String.ach, cchSrcName, offPos, d_ino, d_type) == 0;
#endif
-
- dir->f_pos += 1;
+ } else {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)
- ctx->pos += 1;
+ fContinue = vbsf_dir_emit_nls(ctx, pEntry->name.String.ach, cchSrcName, d_ino, d_type, pSuperInfo);
+#else
+ fContinue = vbsf_dir_emit_nls(opaque, filldir, offPos, pEntry->name.String.ach, cchSrcName,
+ d_ino, d_type, pSuperInfo);
#endif
- }
-
- BUG();
+ }
+ if (fContinue) {
+ /* likely */
+ } else {
+ sf_d->cEntriesLeft = cEntriesLeft;
+ sf_d->pEntry = pEntry;
+ sf_d->offPos = offPos;
+ up(&sf_d->Lock);
+ return 0;
+ }
+ }
+
+ /*
+ * Advance to the next entry.
+ */
+ pEntry = (PSHFLDIRINFO)((uintptr_t)pEntry + RT_UOFFSETOF(SHFLDIRINFO, name.String) + cbSrcName);
+ offPos += 1;
+ dir->f_pos = offPos;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)
+ ctx->pos = offPos;
+#endif
+ cEntriesLeft -= 1;
+ } while (cEntriesLeft > 0);
+
+ /* Done with all available entries. */
+ sf_d->offPos = offPos + cEntriesLeft;
+ sf_d->pEntry = pBuf;
+ sf_d->cEntriesLeft = 0;
+ }
}
-struct file_operations sf_dir_fops = {
- .open = sf_dir_open,
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)
- .iterate = sf_dir_iterate,
+
+/**
+ * Directory file operations.
+ */
+struct file_operations vbsf_dir_fops = {
+ .open = vbsf_dir_open,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
+ .iterate_shared = vbsf_dir_iterate,
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)
+ .iterate = vbsf_dir_iterate,
#else
- .readdir = sf_dir_read,
+ .readdir = vbsf_dir_read,
#endif
- .release = sf_dir_release,
- .read = generic_read_dir
+ .release = vbsf_dir_release,
+ .read = generic_read_dir,
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37)
- , .llseek = generic_file_llseek
+ .llseek = generic_file_llseek
#endif
};
-/* iops */
+
+
+/*********************************************************************************************************************************
+* Directory Inode Operations *
+*********************************************************************************************************************************/
+
+/**
+ * Worker for vbsf_inode_lookup(), vbsf_create_worker() and
+ * vbsf_inode_instantiate().
+ */
+static struct inode *vbsf_create_inode(struct inode *parent, struct dentry *dentry, PSHFLSTRING path,
+ PSHFLFSOBJINFO pObjInfo, struct vbsf_super_info *pSuperInfo, bool fInstantiate)
+{
+ /*
+ * Allocate memory for our additional inode info and create an inode.
+ */
+ struct vbsf_inode_info *sf_new_i = (struct vbsf_inode_info *)kmalloc(sizeof(*sf_new_i), GFP_KERNEL);
+ if (sf_new_i) {
+ ino_t iNodeNo = iunique(parent->i_sb, 16);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 25)
+ struct inode *pInode = iget_locked(parent->i_sb, iNodeNo);
+#else
+ struct inode *pInode = iget(parent->i_sb, iNodeNo);
+#endif
+ if (pInode) {
+ /*
+ * Initialize the two structures.
+ */
+#ifdef VBOX_STRICT
+ sf_new_i->u32Magic = SF_INODE_INFO_MAGIC;
+#endif
+ sf_new_i->path = path;
+ sf_new_i->force_restat = false;
+ sf_new_i->ts_up_to_date = jiffies;
+ RTListInit(&sf_new_i->HandleList);
+ sf_new_i->handle = SHFL_HANDLE_NIL;
+
+ VBSF_SET_INODE_INFO(pInode, sf_new_i);
+ vbsf_init_inode(pInode, sf_new_i, pObjInfo, pSuperInfo);
+
+ /*
+ * Before we unlock the new inode, we may need to call d_instantiate.
+ */
+ if (fInstantiate)
+ d_instantiate(dentry, pInode);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 25)
+ unlock_new_inode(pInode);
+#endif
+ return pInode;
+
+ }
+ LogFunc(("iget failed\n"));
+ kfree(sf_new_i);
+ } else
+ LogRelFunc(("could not allocate memory for new inode info\n"));
+ return NULL;
+}
+
+
+/** Helper for vbsf_create_worker() and vbsf_inode_lookup() that wraps
+ * d_add() and setting d_op. */
+DECLINLINE(void) vbsf_d_add_inode(struct dentry *dentry, struct inode *pNewInode)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 38)
+ Assert(dentry->d_op == &vbsf_dentry_ops); /* (taken from the superblock) */
+#else
+ dentry->d_op = &vbsf_dentry_ops;
+#endif
+ d_add(dentry, pNewInode);
+}
+
/**
* This is called when vfs failed to locate dentry in the cache. The
@@ -366,253 +649,387 @@ struct file_operations sf_dir_fops = {
* the entry via other means. NULL(or "positive" pointer) ought to be
* returned in case of success and "negative" pointer on error
*/
-static struct dentry *sf_lookup(struct inode *parent, struct dentry *dentry
+static struct dentry *vbsf_inode_lookup(struct inode *parent, struct dentry *dentry
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)
- , unsigned int flags
+ , unsigned int flags
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
- , struct nameidata *nd
+ , struct nameidata *nd
#endif
- )
+ )
{
- int err;
- struct sf_inode_info *sf_i, *sf_new_i;
- struct sf_glob_info *sf_g;
- SHFLSTRING *path;
- struct inode *inode;
- ino_t ino;
- SHFLFSOBJINFO fsinfo;
-
- TRACE();
- sf_g = GET_GLOB_INFO(parent->i_sb);
- sf_i = GET_INODE_INFO(parent);
-
- BUG_ON(!sf_g);
- BUG_ON(!sf_i);
-
- err = sf_path_from_dentry(__func__, sf_g, sf_i, dentry, &path);
- if (err)
- goto fail0;
-
- err = sf_stat(__func__, sf_g, path, &fsinfo, 1);
- if (err) {
- if (err == -ENOENT) {
- /* -ENOENT: add NULL inode to dentry so it later can be
- created via call to create/mkdir/open */
- kfree(path);
- inode = NULL;
- } else
- goto fail1;
- } else {
- sf_new_i = kmalloc(sizeof(*sf_new_i), GFP_KERNEL);
- if (!sf_new_i) {
- LogRelFunc(("could not allocate memory for new inode info\n"));
- err = -ENOMEM;
- goto fail1;
- }
- sf_new_i->handle = SHFL_HANDLE_NIL;
- sf_new_i->force_reread = 0;
-
- ino = iunique(parent->i_sb, 1);
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 25)
- inode = iget_locked(parent->i_sb, ino);
-#else
- inode = iget(parent->i_sb, ino);
-#endif
- if (!inode) {
- LogFunc(("iget failed\n"));
- err = -ENOMEM; /* XXX: ??? */
- goto fail2;
- }
-
- SET_INODE_INFO(inode, sf_new_i);
- sf_init_inode(sf_g, inode, &fsinfo);
- sf_new_i->path = path;
+ struct vbsf_super_info *pSuperInfo = VBSF_GET_SUPER_INFO(parent->i_sb);
+ struct vbsf_inode_info *sf_i = VBSF_GET_INODE_INFO(parent);
+ SHFLSTRING *path;
+ struct dentry *dret;
+ int rc;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 25)
- unlock_new_inode(inode);
-#endif
- }
-
- sf_i->force_restat = 0;
- dentry->d_time = jiffies;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 38)
- d_set_d_op(dentry, &sf_dentry_ops);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)
+ SFLOGFLOW(("vbsf_inode_lookup: parent=%p dentry=%p flags=%#x\n", parent, dentry, flags));
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
+ SFLOGFLOW(("vbsf_inode_lookup: parent=%p dentry=%p nd=%p{.flags=%#x}\n", parent, dentry, nd, nd ? nd->flags : 0));
#else
- dentry->d_op = &sf_dentry_ops;
+ SFLOGFLOW(("vbsf_inode_lookup: parent=%p dentry=%p\n", parent, dentry));
#endif
- d_add(dentry, inode);
- return NULL;
-
- fail2:
- kfree(sf_new_i);
-
- fail1:
- kfree(path);
- fail0:
- return ERR_PTR(err);
+ Assert(pSuperInfo);
+ Assert(sf_i && sf_i->u32Magic == SF_INODE_INFO_MAGIC);
+
+ /*
+ * Build the path. We'll associate the path with dret's inode on success.
+ */
+ rc = vbsf_path_from_dentry(pSuperInfo, sf_i, dentry, &path, __func__);
+ if (rc == 0) {
+ /*
+ * Do a lookup on the host side.
+ */
+ VBOXSFCREATEREQ *pReq = (VBOXSFCREATEREQ *)VbglR0PhysHeapAlloc(sizeof(*pReq) + path->u16Size);
+ if (pReq) {
+ struct inode *pInode = NULL;
+
+ RT_ZERO(*pReq);
+ memcpy(&pReq->StrPath, path, SHFLSTRING_HEADER_SIZE + path->u16Size);
+ pReq->CreateParms.Handle = SHFL_HANDLE_NIL;
+ pReq->CreateParms.CreateFlags = SHFL_CF_LOOKUP | SHFL_CF_ACT_FAIL_IF_NEW;
+
+ SFLOG2(("vbsf_inode_lookup: Calling VbglR0SfHostReqCreate on %s\n", path->String.utf8));
+ rc = VbglR0SfHostReqCreate(pSuperInfo->map.root, pReq);
+ if (RT_SUCCESS(rc)) {
+ if (pReq->CreateParms.Result == SHFL_FILE_EXISTS) {
+ /*
+ * Create an inode for the result. Since this also confirms
+ * the existence of all parent dentries, we increase their TTL.
+ */
+ pInode = vbsf_create_inode(parent, dentry, path, &pReq->CreateParms.Info, pSuperInfo, false /*fInstantiate*/);
+ if (rc == 0) {
+ path = NULL; /* given to the inode */
+ dret = dentry;
+ } else
+ dret = (struct dentry *)ERR_PTR(-ENOMEM);
+ vbsf_dentry_chain_increase_parent_ttl(dentry);
+ } else if ( pReq->CreateParms.Result == SHFL_FILE_NOT_FOUND
+ || pReq->CreateParms.Result == SHFL_PATH_NOT_FOUND /*this probably should happen*/) {
+ dret = dentry;
+ } else {
+ AssertMsgFailed(("%d\n", pReq->CreateParms.Result));
+ dret = (struct dentry *)ERR_PTR(-EPROTO);
+ }
+ } else if (rc == VERR_INVALID_NAME) {
+ SFLOGFLOW(("vbsf_inode_lookup: VERR_INVALID_NAME\n"));
+ dret = dentry; /* this can happen for names like 'foo*' on a Windows host */
+ } else if (rc == VERR_FILENAME_TOO_LONG) {
+ SFLOG(("vbsf_inode_lookup: VbglR0SfHostReqCreate failed on %s: VERR_FILENAME_TOO_LONG\n", path->String.utf8));
+ dret = (struct dentry *)ERR_PTR(-ENAMETOOLONG);
+ } else {
+ SFLOG(("vbsf_inode_lookup: VbglR0SfHostReqCreate failed on %s: %Rrc\n", path->String.utf8, rc));
+ dret = (struct dentry *)ERR_PTR(-EPROTO);
+ }
+ VbglR0PhysHeapFree(pReq);
+
+ /*
+ * When dret is set to dentry we got something to insert,
+ * though it may be negative (pInode == NULL).
+ */
+ if (dret == dentry) {
+ vbsf_dentry_set_update_jiffies(dentry, jiffies);
+ vbsf_d_add_inode(dentry, pInode);
+ dret = NULL;
+ }
+ } else {
+ SFLOGFLOW(("vbsf_inode_lookup: -ENOMEM (phys heap)\n"));
+ dret = (struct dentry *)ERR_PTR(-ENOMEM);
+ }
+ if (path)
+ kfree(path);
+ } else {
+ SFLOG(("vbsf_inode_lookup: vbsf_path_from_dentry failed: %d\n", rc));
+ dret = (struct dentry *)ERR_PTR(rc);
+ }
+ return dret;
}
+
/**
- * This should allocate memory for sf_inode_info, compute a unique inode
+ * This should allocate memory for vbsf_inode_info, compute a unique inode
* number, get an inode from vfs, initialize inode info, instantiate
* dentry.
*
* @param parent inode entry of the directory
* @param dentry directory cache entry
- * @param path path name
+ * @param path path name. Consumed on success.
* @param info file information
* @param handle handle
* @returns 0 on success, Linux error code otherwise
*/
-static int sf_instantiate(struct inode *parent, struct dentry *dentry,
- SHFLSTRING * path, PSHFLFSOBJINFO info,
- SHFLHANDLE handle)
+static int vbsf_inode_instantiate(struct inode *parent, struct dentry *dentry, PSHFLSTRING path,
+ PSHFLFSOBJINFO info, SHFLHANDLE handle)
{
- int err;
- ino_t ino;
- struct inode *inode;
- struct sf_inode_info *sf_new_i;
- struct sf_glob_info *sf_g = GET_GLOB_INFO(parent->i_sb);
-
- TRACE();
- BUG_ON(!sf_g);
-
- sf_new_i = kmalloc(sizeof(*sf_new_i), GFP_KERNEL);
- if (!sf_new_i) {
- LogRelFunc(("could not allocate inode info.\n"));
- err = -ENOMEM;
- goto fail0;
- }
-
- ino = iunique(parent->i_sb, 1);
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 25)
- inode = iget_locked(parent->i_sb, ino);
-#else
- inode = iget(parent->i_sb, ino);
-#endif
- if (!inode) {
- LogFunc(("iget failed\n"));
- err = -ENOMEM;
- goto fail1;
- }
+ struct vbsf_super_info *pSuperInfo = VBSF_GET_SUPER_INFO(parent->i_sb);
+ struct inode *pInode = vbsf_create_inode(parent, dentry, path, info, pSuperInfo, true /*fInstantiate*/);
+ if (pInode) {
+ /* Store this handle if we leave the handle open. */
+ struct vbsf_inode_info *sf_new_i = VBSF_GET_INODE_INFO(pInode);
+ sf_new_i->handle = handle;
+ return 0;
+ }
+ return -ENOMEM;
+}
- sf_init_inode(sf_g, inode, info);
- sf_new_i->path = path;
- SET_INODE_INFO(inode, sf_new_i);
- sf_new_i->force_restat = 1;
- sf_new_i->force_reread = 0;
- d_instantiate(dentry, inode);
+/**
+ * Create a new regular file / directory.
+ *
+ * @param parent inode of the directory
+ * @param dentry directory cache entry
+ * @param mode file mode
+ * @param fCreateFlags SHFL_CF_XXX.
+ * @param fStashHandle Whether the resulting handle should be stashed in
+ * the inode for a subsequent open call.
+ * @param fDoLookup Whether we're doing a lookup and need to d_add the
+ * inode we create to dentry.
+ * @param phHostFile Where to return the handle to the create file/dir.
+ * @param pfCreated Where to indicate whether the file/dir was created
+ * or not. Optional.
+ * @returns 0 on success, Linux error code otherwise
+ */
+static int vbsf_create_worker(struct inode *parent, struct dentry *dentry, umode_t mode, uint32_t fCreateFlags,
+ bool fStashHandle, bool fDoLookup, SHFLHANDLE *phHostFile, bool *pfCreated)
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 25)
- unlock_new_inode(inode);
+{
+#ifdef SFLOG_ENABLED
+ const char * const pszPrefix = S_ISDIR(mode) ? "vbsf_create_worker/dir:" : "vbsf_create_worker/file:";
#endif
-
- /* Store this handle if we leave the handle open. */
- sf_new_i->handle = handle;
- return 0;
-
- fail1:
- kfree(sf_new_i);
-
- fail0:
- return err;
-
+ struct vbsf_inode_info *sf_parent_i = VBSF_GET_INODE_INFO(parent);
+ struct vbsf_super_info *pSuperInfo = VBSF_GET_SUPER_INFO(parent->i_sb);
+ PSHFLSTRING path;
+ int rc;
+
+ AssertReturn(sf_parent_i, -EINVAL);
+ AssertReturn(pSuperInfo, -EINVAL);
+
+ /*
+ * Build a path. We'll donate this to the inode on success.
+ */
+ rc = vbsf_path_from_dentry(pSuperInfo, sf_parent_i, dentry, &path, __func__);
+ if (rc == 0) {
+ /*
+ * Allocate, initialize and issue the SHFL_CREATE request.
+ */
+ /** @todo combine with vbsf_path_from_dentry? */
+ union CreateAuxReq
+ {
+ VBOXSFCREATEREQ Create;
+ VBOXSFCLOSEREQ Close;
+ } *pReq = (union CreateAuxReq *)VbglR0PhysHeapAlloc(RT_UOFFSETOF(VBOXSFCREATEREQ, StrPath.String) + path->u16Size);
+ if (pReq) {
+ memcpy(&pReq->Create.StrPath, path, SHFLSTRING_HEADER_SIZE + path->u16Size);
+ RT_ZERO(pReq->Create.CreateParms);
+ pReq->Create.CreateParms.Handle = SHFL_HANDLE_NIL;
+ pReq->Create.CreateParms.CreateFlags = fCreateFlags;
+ pReq->Create.CreateParms.Info.Attr.fMode = (S_ISDIR(mode) ? RTFS_TYPE_DIRECTORY : RTFS_TYPE_FILE)
+ | sf_access_permissions_to_vbox(mode);
+ pReq->Create.CreateParms.Info.Attr.enmAdditional = RTFSOBJATTRADD_NOTHING;
+
+ SFLOGFLOW(("%s calling VbglR0SfHostReqCreate(%s, %#x)\n", pszPrefix, path->String.ach, pReq->Create.CreateParms.CreateFlags));
+ rc = VbglR0SfHostReqCreate(pSuperInfo->map.root, &pReq->Create);
+ if (RT_SUCCESS(rc)) {
+ SFLOGFLOW(("%s VbglR0SfHostReqCreate returned %Rrc Result=%d Handle=%#llx\n",
+ pszPrefix, rc, pReq->Create.CreateParms.Result, pReq->Create.CreateParms.Handle));
+
+ /*
+ * Work the dentry cache and inode restatting.
+ */
+ if ( pReq->Create.CreateParms.Result == SHFL_FILE_CREATED
+ || pReq->Create.CreateParms.Result == SHFL_FILE_REPLACED) {
+ vbsf_dentry_chain_increase_parent_ttl(dentry);
+ sf_parent_i->force_restat = 1;
+ } else if ( pReq->Create.CreateParms.Result == SHFL_FILE_EXISTS
+ || pReq->Create.CreateParms.Result == SHFL_FILE_NOT_FOUND)
+ vbsf_dentry_chain_increase_parent_ttl(dentry);
+
+ /*
+ * If we got a handle back, we're good. Create an inode for it and return.
+ */
+ if (pReq->Create.CreateParms.Handle != SHFL_HANDLE_NIL) {
+ struct inode *pNewInode = vbsf_create_inode(parent, dentry, path, &pReq->Create.CreateParms.Info, pSuperInfo,
+ !fDoLookup /*fInstantiate*/);
+ if (pNewInode) {
+ struct vbsf_inode_info *sf_new_i = VBSF_GET_INODE_INFO(pNewInode);
+ if (phHostFile) {
+ *phHostFile = pReq->Create.CreateParms.Handle;
+ pReq->Create.CreateParms.Handle = SHFL_HANDLE_NIL;
+ } else if (fStashHandle) {
+ sf_new_i->handle = pReq->Create.CreateParms.Handle;
+ pReq->Create.CreateParms.Handle = SHFL_HANDLE_NIL;
+ }
+ if (fDoLookup)
+ vbsf_d_add_inode(dentry, pNewInode);
+ path = NULL;
+ } else {
+ SFLOGFLOW(("%s vbsf_create_inode failed: -ENOMEM (path %s)\n", pszPrefix, rc, path->String.ach));
+ rc = -ENOMEM;
+ }
+ } else if (pReq->Create.CreateParms.Result == SHFL_FILE_EXISTS) {
+ /*
+ * For atomic_open (at least), we should create an inode and
+ * convert the dentry from a negative to a positive one.
+ */
+ SFLOGFLOW(("%s SHFL_FILE_EXISTS for %s\n", pszPrefix, sf_parent_i->path->String.ach));
+ if (fDoLookup) {
+ struct inode *pNewInode = vbsf_create_inode(parent, dentry, path, &pReq->Create.CreateParms.Info,
+ pSuperInfo, false /*fInstantiate*/);
+ if (pNewInode)
+ vbsf_d_add_inode(dentry, pNewInode);
+ path = NULL;
+ }
+ rc = -EEXIST;
+ } else if (pReq->Create.CreateParms.Result == SHFL_FILE_NOT_FOUND) {
+ SFLOGFLOW(("%s SHFL_FILE_NOT_FOUND for %s\n", pszPrefix, sf_parent_i->path->String.ach));
+ rc = -ENOENT;
+ } else if (pReq->Create.CreateParms.Result == SHFL_PATH_NOT_FOUND) {
+ SFLOGFLOW(("%s SHFL_PATH_NOT_FOUND for %s\n", pszPrefix, sf_parent_i->path->String.ach));
+ rc = -ENOENT;
+ } else {
+ AssertMsgFailed(("result=%d creating '%s'\n", pReq->Create.CreateParms.Result, sf_parent_i->path->String.ach));
+ rc = -EPERM;
+ }
+ } else {
+ int const vrc = rc;
+ rc = -RTErrConvertToErrno(vrc);
+ SFLOGFLOW(("%s SHFL_FN_CREATE(%s) failed vrc=%Rrc rc=%d\n", pszPrefix, path->String.ach, vrc, rc));
+ }
+
+ /* Cleanups. */
+ if (pReq->Create.CreateParms.Handle != SHFL_HANDLE_NIL) {
+ AssertCompile(RTASSERT_OFFSET_OF(VBOXSFCREATEREQ, CreateParms.Handle) > sizeof(VBOXSFCLOSEREQ)); /* no aliasing issues */
+ int rc2 = VbglR0SfHostReqClose(pSuperInfo->map.root, &pReq->Close, pReq->Create.CreateParms.Handle);
+ if (RT_FAILURE(rc2))
+ SFLOGFLOW(("%s VbglR0SfHostReqCloseSimple failed rc=%Rrc\n", pszPrefix, rc2));
+ }
+ VbglR0PhysHeapFree(pReq);
+ } else
+ rc = -ENOMEM;
+ if (path)
+ kfree(path);
+ }
+ return rc;
}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
/**
- * Create a new regular file / directory.
+ * More atomic way of handling creation.
*
- * @param parent inode of the directory
- * @param dentry directory cache entry
- * @param mode file mode
- * @param fDirectory true if directory, false otherwise
- * @returns 0 on success, Linux error code otherwise
+ * Older kernels would first to a lookup that created the file, followed by
+ * an open call. We've got this horrid vbsf_inode_info::handle member because
+ * of that approach. The call combines the lookup and open.
*/
-static int sf_create_aux(struct inode *parent, struct dentry *dentry,
- umode_t mode, int fDirectory)
+static int vbsf_inode_atomic_open(struct inode *pDirInode, struct dentry *dentry, struct file *file, unsigned fOpen,
+ umode_t fMode
+# if LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)
+ , int *opened
+# endif
+ )
{
- int rc, err;
- SHFLCREATEPARMS params;
- SHFLSTRING *path;
- struct sf_inode_info *sf_i = GET_INODE_INFO(parent);
- struct sf_glob_info *sf_g = GET_GLOB_INFO(parent->i_sb);
-
- TRACE();
- BUG_ON(!sf_i);
- BUG_ON(!sf_g);
-
- err = sf_path_from_dentry(__func__, sf_g, sf_i, dentry, &path);
- if (err)
- goto fail0;
-
- RT_ZERO(params);
- params.Handle = SHFL_HANDLE_NIL;
- params.CreateFlags = 0
- | SHFL_CF_ACT_CREATE_IF_NEW
- | SHFL_CF_ACT_FAIL_IF_EXISTS
- | SHFL_CF_ACCESS_READWRITE | (fDirectory ? SHFL_CF_DIRECTORY : 0);
- params.Info.Attr.fMode = 0
- | (fDirectory ? RTFS_TYPE_DIRECTORY : RTFS_TYPE_FILE)
- | (mode & S_IRWXUGO);
- params.Info.Attr.enmAdditional = RTFSOBJATTRADD_NOTHING;
-
- LogFunc(("sf_create_aux: calling VbglR0SfCreate, folder %s, flags %#x\n", path->String.utf8, params.CreateFlags));
- rc = VbglR0SfCreate(&client_handle, &sf_g->map, path, ¶ms);
- if (RT_FAILURE(rc)) {
- if (rc == VERR_WRITE_PROTECT) {
- err = -EROFS;
- goto fail1;
- }
- err = -EPROTO;
- LogFunc(("(%d): VbglR0SfCreate(%s) failed rc=%Rrc\n",
- fDirectory, sf_i->path->String.utf8, rc));
- goto fail1;
- }
-
- if (params.Result != SHFL_FILE_CREATED) {
- err = -EPERM;
- LogFunc(("(%d): could not create file %s result=%d\n",
- fDirectory, sf_i->path->String.utf8, params.Result));
- goto fail1;
- }
-
- err = sf_instantiate(parent, dentry, path, ¶ms.Info,
- fDirectory ? SHFL_HANDLE_NIL : params.Handle);
- if (err) {
- LogFunc(("(%d): could not instantiate dentry for %s err=%d\n",
- fDirectory, sf_i->path->String.utf8, err));
- goto fail2;
- }
-
- /*
- * Don't close this handle right now. We assume that the same file is
- * opened with sf_reg_open() and later closed with sf_reg_close(). Save
- * the handle in between. Does not apply to directories. True?
- */
- if (fDirectory) {
- rc = VbglR0SfClose(&client_handle, &sf_g->map, params.Handle);
- if (RT_FAILURE(rc))
- LogFunc(("(%d): VbglR0SfClose failed rc=%Rrc\n",
- fDirectory, rc));
- }
-
- sf_i->force_restat = 1;
- return 0;
-
- fail2:
- rc = VbglR0SfClose(&client_handle, &sf_g->map, params.Handle);
- if (RT_FAILURE(rc))
- LogFunc(("(%d): VbglR0SfClose failed rc=%Rrc\n", fDirectory,
- rc));
-
- fail1:
- kfree(path);
-
- fail0:
- return err;
+ SFLOGFLOW(("vbsf_inode_atomic_open: pDirInode=%p dentry=%p file=%p fOpen=%#x, fMode=%#x\n", pDirInode, dentry, file, fOpen, fMode));
+ int rc;
+
+ /* Code assumes negative dentry. */
+ Assert(dentry->d_inode == NULL);
+
+ /** @todo see if we can do this for non-create calls too, as it may save us a
+ * host call to revalidate the dentry. (Can't see anyone else doing
+ * this, so playing it safe for now.) */
+ if (fOpen & O_CREAT) {
+ /*
+ * Prepare our file info structure.
+ */
+ struct vbsf_reg_info *sf_r = kmalloc(sizeof(*sf_r), GFP_KERNEL);
+ if (sf_r) {
+ bool fCreated = false;
+ uint32_t fCreateFlags;
+
+ RTListInit(&sf_r->Handle.Entry);
+ sf_r->Handle.cRefs = 1;
+ sf_r->Handle.fFlags = !(fOpen & O_DIRECTORY)
+ ? VBSF_HANDLE_F_FILE | VBSF_HANDLE_F_MAGIC
+ : VBSF_HANDLE_F_DIR | VBSF_HANDLE_F_MAGIC;
+ sf_r->Handle.hHost = SHFL_HANDLE_NIL;
+
+ /*
+ * Try create it.
+ */
+ /* vbsf_create_worker uses the type from fMode, so match it up to O_DIRECTORY. */
+ AssertMsg(!(fMode & S_IFMT) || (fMode & S_IFMT) == (fOpen & O_DIRECTORY ? S_IFDIR : S_IFREG), ("0%o\n", fMode));
+ if (!(fOpen & O_DIRECTORY))
+ fMode = (fMode & ~S_IFMT) | S_IFREG;
+ else
+ fMode = (fMode & ~S_IFMT) | S_IFDIR;
+
+ fCreateFlags = vbsf_linux_oflags_to_vbox(fOpen, &sf_r->Handle.fFlags, __FUNCTION__);
+
+ rc = vbsf_create_worker(pDirInode, dentry, fMode, fCreateFlags, false /*fStashHandle*/, true /*fDoLookup*/,
+ &sf_r->Handle.hHost, &fCreated);
+ if (rc == 0) {
+ struct inode *inode = dentry->d_inode;
+ struct vbsf_inode_info *sf_i = VBSF_GET_INODE_INFO(inode);
+
+ /*
+ * Set FMODE_CREATED according to the action taken by SHFL_CREATE
+ * and call finish_open() to do the remaining open() work.
+ */
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0)
+ if (fCreated)
+ file->f_mode |= FMODE_CREATED;
+ rc = finish_open(file, dentry, generic_file_open);
+# else
+ if (fCreated)
+ *opened |= FILE_CREATED;
+ rc = finish_open(file, dentry, generic_file_open, opened);
+# endif
+ if (rc == 0) {
+ /*
+ * Now that the file is fully opened, associate sf_r with it
+ * and link the handle to the inode.
+ */
+ vbsf_handle_append(sf_i, &sf_r->Handle);
+ file->private_data = sf_r;
+ SFLOGFLOW(("vbsf_inode_atomic_open: create succeeded; hHost=%#llx path='%s'\n",
+ rc, sf_r->Handle.hHost, sf_i->path->String.ach));
+ sf_r = NULL; /* don't free it */
+ } else {
+ struct vbsf_super_info *pSuperInfo = VBSF_GET_SUPER_INFO(pDirInode->i_sb);
+ SFLOGFLOW(("vbsf_inode_atomic_open: finish_open failed: %d (path='%s'\n", rc, sf_i->path->String.ach));
+ VbglR0SfHostReqCloseSimple(pSuperInfo->map.root, sf_r->Handle.hHost);
+ sf_r->Handle.hHost = SHFL_HANDLE_NIL;
+ }
+ } else
+ SFLOGFLOW(("vbsf_inode_atomic_open: vbsf_create_worker failed: %d\n", rc));
+ if (sf_r)
+ kfree(sf_r);
+ } else {
+ LogRelMaxFunc(64, ("could not allocate reg info\n"));
+ rc = -ENOMEM;
+ }
+ }
+ /*
+ * Not creating anything.
+ * Do we need to do a lookup or should we just fail?
+ */
+ else if (d_in_lookup(dentry)) {
+ struct dentry *pResult = vbsf_inode_lookup(pDirInode, dentry, 0 /*fFlags*/);
+ if (!IS_ERR(pResult))
+ rc = finish_no_open(file, pResult);
+ else
+ rc = PTR_ERR(pResult);
+ SFLOGFLOW(("vbsf_inode_atomic_open: open -> %d (%p)\n", rc, pResult));
+ } else {
+ SFLOGFLOW(("vbsf_inode_atomic_open: open -> -ENOENT\n"));
+ rc = -ENOENT;
+ }
+ return rc;
}
+#endif /* 3.6.0 */
+
/**
* Create a new regular file.
@@ -624,22 +1041,34 @@ static int sf_create_aux(struct inode *parent, struct dentry *dentry,
* @returns 0 on success, Linux error code otherwise
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) || defined(DOXYGEN_RUNNING)
-static int sf_create(struct inode *parent, struct dentry *dentry, umode_t mode,
- bool excl)
+static int vbsf_inode_create(struct inode *parent, struct dentry *dentry, umode_t mode, bool excl)
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)
-static int sf_create(struct inode *parent, struct dentry *dentry, umode_t mode,
- struct nameidata *nd)
-#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
-static int sf_create(struct inode *parent, struct dentry *dentry, int mode,
- struct nameidata *nd)
+static int vbsf_inode_create(struct inode *parent, struct dentry *dentry, umode_t mode, struct nameidata *nd)
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 75)
+static int vbsf_inode_create(struct inode *parent, struct dentry *dentry, int mode, struct nameidata *nd)
#else
-static int sf_create(struct inode *parent, struct dentry *dentry, int mode)
+static int vbsf_inode_create(struct inode *parent, struct dentry *dentry, int mode)
#endif
{
- TRACE();
- return sf_create_aux(parent, dentry, mode, 0);
+ uint32_t fCreateFlags = SHFL_CF_ACT_CREATE_IF_NEW
+ | SHFL_CF_ACT_FAIL_IF_EXISTS
+ | SHFL_CF_ACCESS_READWRITE;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 75)
+ /* Clear the RD flag if write-only access requested. Otherwise assume we
+ need write access to create stuff. */
+ if (!(nd->intent.open.flags & 1) ) {
+ fCreateFlags &= SHFL_CF_ACCESS_READWRITE;
+ fCreateFlags |= SHFL_CF_ACCESS_WRITE;
+ }
+ /* (file since 2.6.15) */
+#endif
+ TRACE();
+ AssertMsg(!(mode & S_IFMT) || (mode & S_IFMT) == S_IFREG, ("0%o\n", mode));
+ return vbsf_create_worker(parent, dentry, (mode & ~S_IFMT) | S_IFREG, fCreateFlags,
+ true /*fStashHandle*/, false /*fDoLookup*/, NULL /*phHandle*/, NULL /*fCreated*/);
}
+
/**
* Create a new directory.
*
@@ -649,15 +1078,22 @@ static int sf_create(struct inode *parent, struct dentry *dentry, int mode)
* @returns 0 on success, Linux error code otherwise
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)
-static int sf_mkdir(struct inode *parent, struct dentry *dentry, umode_t mode)
+static int vbsf_inode_mkdir(struct inode *parent, struct dentry *dentry, umode_t mode)
#else
-static int sf_mkdir(struct inode *parent, struct dentry *dentry, int mode)
+static int vbsf_inode_mkdir(struct inode *parent, struct dentry *dentry, int mode)
#endif
{
- TRACE();
- return sf_create_aux(parent, dentry, mode, 1);
+ TRACE();
+ AssertMsg(!(mode & S_IFMT) || (mode & S_IFMT) == S_IFDIR, ("0%o\n", mode));
+ return vbsf_create_worker(parent, dentry, (mode & ~S_IFMT) | S_IFDIR,
+ SHFL_CF_ACT_CREATE_IF_NEW
+ | SHFL_CF_ACT_FAIL_IF_EXISTS
+ | SHFL_CF_ACCESS_READWRITE
+ | SHFL_CF_DIRECTORY,
+ false /*fStashHandle*/, false /*fDoLookup*/, NULL /*phHandle*/, NULL /*fCreated*/);
}
+
/**
* Remove a regular file / directory.
*
@@ -666,47 +1102,55 @@ static int sf_mkdir(struct inode *parent, struct dentry *dentry, int mode)
* @param fDirectory true if directory, false otherwise
* @returns 0 on success, Linux error code otherwise
*/
-static int sf_unlink_aux(struct inode *parent, struct dentry *dentry,
- int fDirectory)
+static int vbsf_unlink_worker(struct inode *parent, struct dentry *dentry, int fDirectory)
{
- int rc, err;
- struct sf_glob_info *sf_g = GET_GLOB_INFO(parent->i_sb);
- struct sf_inode_info *sf_i = GET_INODE_INFO(parent);
- SHFLSTRING *path;
- uint32_t fFlags;
-
- TRACE();
- BUG_ON(!sf_g);
-
- err = sf_path_from_dentry(__func__, sf_g, sf_i, dentry, &path);
- if (err)
- goto fail0;
-
- fFlags = fDirectory ? SHFL_REMOVE_DIR : SHFL_REMOVE_FILE;
- if (dentry->d_inode && ((dentry->d_inode->i_mode & S_IFLNK) == S_IFLNK))
- fFlags |= SHFL_REMOVE_SYMLINK;
- rc = VbglR0SfRemove(&client_handle, &sf_g->map, path, fFlags);
- if (RT_FAILURE(rc)) {
- LogFunc(("(%d): VbglR0SfRemove(%s) failed rc=%Rrc\n",
- fDirectory, path->String.utf8, rc));
- err = -RTErrConvertToErrno(rc);
- goto fail1;
- }
-
- /* directory access/change time changed */
- sf_i->force_restat = 1;
- /* directory content changed */
- sf_i->force_reread = 1;
-
- err = 0;
-
- fail1:
- kfree(path);
-
- fail0:
- return err;
+ struct vbsf_super_info *pSuperInfo = VBSF_GET_SUPER_INFO(parent->i_sb);
+ struct vbsf_inode_info *sf_parent_i = VBSF_GET_INODE_INFO(parent);
+ SHFLSTRING *path;
+ int rc;
+
+ TRACE();
+
+ rc = vbsf_path_from_dentry(pSuperInfo, sf_parent_i, dentry, &path, __func__);
+ if (!rc) {
+ VBOXSFREMOVEREQ *pReq = (VBOXSFREMOVEREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF(VBOXSFREMOVEREQ, StrPath.String)
+ + path->u16Size);
+ if (pReq) {
+ memcpy(&pReq->StrPath, path, SHFLSTRING_HEADER_SIZE + path->u16Size);
+ uint32_t fFlags = fDirectory ? SHFL_REMOVE_DIR : SHFL_REMOVE_FILE;
+ if (dentry->d_inode && ((dentry->d_inode->i_mode & S_IFLNK) == S_IFLNK))
+ fFlags |= SHFL_REMOVE_SYMLINK;
+
+ rc = VbglR0SfHostReqRemove(pSuperInfo->map.root, pReq, fFlags);
+
+ if (dentry->d_inode) {
+ struct vbsf_inode_info *sf_i = VBSF_GET_INODE_INFO(dentry->d_inode);
+ sf_i->force_restat = true;
+ }
+
+ if (RT_SUCCESS(rc)) {
+ sf_parent_i->force_restat = true; /* directory access/change time changed */
+ rc = 0;
+ } else if (rc == VERR_FILE_NOT_FOUND || rc == VERR_PATH_NOT_FOUND) {
+ /* Probably deleted on the host while the guest had it cached, so don't complain: */
+ LogFunc(("(%d): VbglR0SfRemove(%s) failed rc=%Rrc; calling d_drop on %p\n",
+ fDirectory, path->String.ach, rc, dentry));
+ sf_parent_i->force_restat = true;
+ d_drop(dentry);
+ rc = 0;
+ } else {
+ LogFunc(("(%d): VbglR0SfRemove(%s) failed rc=%Rrc\n", fDirectory, path->String.ach, rc));
+ rc = -RTErrConvertToErrno(rc);
+ }
+ VbglR0PhysHeapFree(pReq);
+ } else
+ rc = -ENOMEM;
+ kfree(path);
+ }
+ return rc;
}
+
/**
* Remove a regular file.
*
@@ -714,12 +1158,13 @@ static int sf_unlink_aux(struct inode *parent, struct dentry *dentry,
* @param dentry directory cache entry
* @returns 0 on success, Linux error code otherwise
*/
-static int sf_unlink(struct inode *parent, struct dentry *dentry)
+static int vbsf_inode_unlink(struct inode *parent, struct dentry *dentry)
{
- TRACE();
- return sf_unlink_aux(parent, dentry, 0);
+ TRACE();
+ return vbsf_unlink_worker(parent, dentry, false /*fDirectory*/);
}
+
/**
* Remove a directory.
*
@@ -727,12 +1172,13 @@ static int sf_unlink(struct inode *parent, struct dentry *dentry)
* @param dentry directory cache entry
* @returns 0 on success, Linux error code otherwise
*/
-static int sf_rmdir(struct inode *parent, struct dentry *dentry)
+static int vbsf_inode_rmdir(struct inode *parent, struct dentry *dentry)
{
- TRACE();
- return sf_unlink_aux(parent, dentry, 1);
+ TRACE();
+ return vbsf_unlink_worker(parent, dentry, true /*fDirectory*/);
}
+
/**
* Rename a regular file / directory.
*
@@ -743,151 +1189,206 @@ static int sf_rmdir(struct inode *parent, struct dentry *dentry)
* @param flags flags
* @returns 0 on success, Linux error code otherwise
*/
-static int sf_rename(struct inode *old_parent, struct dentry *old_dentry,
- struct inode *new_parent, struct dentry *new_dentry
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)
- , unsigned flags
-#endif
- )
+static int vbsf_inode_rename(struct inode *old_parent, struct dentry *old_dentry,
+ struct inode *new_parent, struct dentry *new_dentry, unsigned flags)
{
- int err = 0, rc = VINF_SUCCESS;
- struct sf_glob_info *sf_g = GET_GLOB_INFO(old_parent->i_sb);
+ /*
+ * Deal with flags.
+ */
+ int rc;
+ uint32_t fRename = (old_dentry->d_inode->i_mode & S_IFDIR ? SHFL_RENAME_DIR : SHFL_RENAME_FILE)
+ | SHFL_RENAME_REPLACE_IF_EXISTS;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0)
+ if (!(flags & ~RENAME_NOREPLACE)) {
+ if (flags & RENAME_NOREPLACE)
+ fRename &= ~SHFL_RENAME_REPLACE_IF_EXISTS;
+#endif
+ /*
+ * Check that they are on the same mount.
+ */
+ struct vbsf_super_info *pSuperInfo = VBSF_GET_SUPER_INFO(old_parent->i_sb);
+ if (pSuperInfo == VBSF_GET_SUPER_INFO(new_parent->i_sb)) {
+ /*
+ * Build the new path.
+ */
+ struct vbsf_inode_info *sf_new_parent_i = VBSF_GET_INODE_INFO(new_parent);
+ PSHFLSTRING pNewPath;
+ rc = vbsf_path_from_dentry(pSuperInfo, sf_new_parent_i, new_dentry, &pNewPath, __func__);
+ if (rc == 0) {
+ /*
+ * Create and issue the rename request.
+ */
+ VBOXSFRENAMEWITHSRCBUFREQ *pReq;
+ pReq = (VBOXSFRENAMEWITHSRCBUFREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF(VBOXSFRENAMEWITHSRCBUFREQ, StrDstPath.String)
+ + pNewPath->u16Size);
+ if (pReq) {
+ struct vbsf_inode_info *sf_file_i = VBSF_GET_INODE_INFO(old_dentry->d_inode);
+ PSHFLSTRING pOldPath = sf_file_i->path;
+
+ memcpy(&pReq->StrDstPath, pNewPath, SHFLSTRING_HEADER_SIZE + pNewPath->u16Size);
+ rc = VbglR0SfHostReqRenameWithSrcContig(pSuperInfo->map.root, pReq, pOldPath, virt_to_phys(pOldPath), fRename);
+ VbglR0PhysHeapFree(pReq);
+ if (RT_SUCCESS(rc)) {
+ /*
+ * On success we replace the path in the inode and trigger
+ * restatting of both parent directories.
+ */
+ struct vbsf_inode_info *sf_old_parent_i = VBSF_GET_INODE_INFO(old_parent);
+ SFLOGFLOW(("vbsf_inode_rename: %s -> %s (%#x)\n", pOldPath->String.ach, pNewPath->String.ach, fRename));
+
+ sf_file_i->path = pNewPath;
+ kfree(pOldPath);
+ pNewPath = NULL;
+
+ sf_new_parent_i->force_restat = 1;
+ sf_old_parent_i->force_restat = 1;
+
+ vbsf_dentry_chain_increase_parent_ttl(old_dentry);
+ vbsf_dentry_chain_increase_parent_ttl(new_dentry);
+
+ rc = 0;
+ } else {
+ SFLOGFLOW(("vbsf_inode_rename: VbglR0SfHostReqRenameWithSrcContig(%s,%s,%#x) failed -> %d\n",
+ pOldPath->String.ach, pNewPath->String.ach, fRename, rc));
+ if (rc == VERR_IS_A_DIRECTORY || rc == VERR_IS_A_FILE)
+ vbsf_dentry_invalidate_ttl(old_dentry);
+ rc = -RTErrConvertToErrno(rc);
+ }
+ } else {
+ SFLOGFLOW(("vbsf_inode_rename: failed to allocate request (%#x bytes)\n",
+ RT_UOFFSETOF(VBOXSFRENAMEWITHSRCBUFREQ, StrDstPath.String) + pNewPath->u16Size));
+ rc = -ENOMEM;
+ }
+ if (pNewPath)
+ kfree(pNewPath);
+ } else
+ SFLOGFLOW(("vbsf_inode_rename: vbsf_path_from_dentry failed: %d\n", rc));
+ } else {
+ SFLOGFLOW(("vbsf_inode_rename: rename with different roots (%#x vs %#x)\n",
+ pSuperInfo->map.root, VBSF_GET_SUPER_INFO(new_parent->i_sb)->map.root));
+ rc = -EXDEV;
+ }
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0)
+ } else {
+ SFLOGFLOW(("vbsf_inode_rename: Unsupported flags: %#x\n", flags));
+ rc = -EINVAL;
+ }
+#else
+ RT_NOREF(flags);
+#endif
+ return rc;
+}
- TRACE();
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)
- if (flags) {
- LogFunc(("rename with flags=%x\n", flags));
- return -EINVAL;
- }
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)
+/**
+ * The traditional rename interface without any flags.
+ */
+static int vbsf_inode_rename_no_flags(struct inode *old_parent, struct dentry *old_dentry,
+ struct inode *new_parent, struct dentry *new_dentry)
+{
+ return vbsf_inode_rename(old_parent, old_dentry, new_parent, new_dentry, 0);
+}
#endif
- if (sf_g != GET_GLOB_INFO(new_parent->i_sb)) {
- LogFunc(("rename with different roots\n"));
- err = -EINVAL;
- } else {
- struct sf_inode_info *sf_old_i = GET_INODE_INFO(old_parent);
- struct sf_inode_info *sf_new_i = GET_INODE_INFO(new_parent);
- /* As we save the relative path inside the inode structure, we need to change
- this if the rename is successful. */
- struct sf_inode_info *sf_file_i =
- GET_INODE_INFO(old_dentry->d_inode);
- SHFLSTRING *old_path;
- SHFLSTRING *new_path;
-
- BUG_ON(!sf_old_i);
- BUG_ON(!sf_new_i);
- BUG_ON(!sf_file_i);
-
- old_path = sf_file_i->path;
- err = sf_path_from_dentry(__func__, sf_g, sf_new_i,
- new_dentry, &new_path);
- if (err)
- LogFunc(("failed to create new path\n"));
- else {
- int fDir =
- ((old_dentry->d_inode->i_mode & S_IFDIR) != 0);
-
- rc = VbglR0SfRename(&client_handle, &sf_g->map,
- old_path, new_path,
- fDir ? 0 : SHFL_RENAME_FILE |
- SHFL_RENAME_REPLACE_IF_EXISTS);
- if (RT_SUCCESS(rc)) {
- kfree(old_path);
- sf_new_i->force_restat = 1;
- sf_old_i->force_restat = 1; /* XXX: needed? */
- /* Set the new relative path in the inode. */
- sf_file_i->path = new_path;
- } else {
- LogFunc(("VbglR0SfRename failed rc=%Rrc\n",
- rc));
- err = -RTErrConvertToErrno(rc);
- kfree(new_path);
- }
- }
- }
- return err;
-}
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
-static int sf_symlink(struct inode *parent, struct dentry *dentry,
- const char *symname)
+/**
+ * Create a symbolic link.
+ */
+static int vbsf_inode_symlink(struct inode *parent, struct dentry *dentry, const char *target)
{
- int err;
- int rc;
- struct sf_inode_info *sf_i;
- struct sf_glob_info *sf_g;
- SHFLSTRING *path, *ssymname;
- SHFLFSOBJINFO info;
- int symname_len = strlen(symname) + 1;
-
- TRACE();
- sf_g = GET_GLOB_INFO(parent->i_sb);
- sf_i = GET_INODE_INFO(parent);
-
- BUG_ON(!sf_g);
- BUG_ON(!sf_i);
-
- err = sf_path_from_dentry(__func__, sf_g, sf_i, dentry, &path);
- if (err)
- goto fail0;
-
- ssymname =
- kmalloc(offsetof(SHFLSTRING, String.utf8) + symname_len,
- GFP_KERNEL);
- if (!ssymname) {
- LogRelFunc(("kmalloc failed, caller=sf_symlink\n"));
- err = -ENOMEM;
- goto fail1;
- }
-
- ssymname->u16Length = symname_len - 1;
- ssymname->u16Size = symname_len;
- memcpy(ssymname->String.utf8, symname, symname_len);
-
- rc = VbglR0SfSymlink(&client_handle, &sf_g->map, path, ssymname, &info);
- kfree(ssymname);
-
- if (RT_FAILURE(rc)) {
- if (rc == VERR_WRITE_PROTECT) {
- err = -EROFS;
- goto fail1;
- }
- LogFunc(("VbglR0SfSymlink(%s) failed rc=%Rrc\n",
- sf_i->path->String.utf8, rc));
- err = -EPROTO;
- goto fail1;
- }
-
- err = sf_instantiate(parent, dentry, path, &info, SHFL_HANDLE_NIL);
- if (err) {
- LogFunc(("could not instantiate dentry for %s err=%d\n",
- sf_i->path->String.utf8, err));
- goto fail1;
- }
-
- sf_i->force_restat = 1;
- return 0;
-
- fail1:
- kfree(path);
- fail0:
- return err;
+ /*
+ * Turn the target into a string (contiguous physcial memory).
+ */
+ /** @todo we can save a kmalloc here if we switch to embedding the target rather
+ * than the symlink path into the request. Will require more NLS helpers. */
+ struct vbsf_super_info *pSuperInfo = VBSF_GET_SUPER_INFO(parent->i_sb);
+ PSHFLSTRING pTarget = NULL;
+ int rc = vbsf_nls_to_shflstring(pSuperInfo, target, &pTarget);
+ if (rc == 0) {
+ /*
+ * Create a full path for the symlink name.
+ */
+ struct vbsf_inode_info *sf_i = VBSF_GET_INODE_INFO(parent);
+ PSHFLSTRING pPath = NULL;
+ rc = vbsf_path_from_dentry(pSuperInfo, sf_i, dentry, &pPath, __func__);
+ if (rc == 0) {
+ /*
+ * Create the request and issue it.
+ */
+ uint32_t const cbReq = RT_UOFFSETOF(VBOXSFCREATESYMLINKREQ, StrSymlinkPath.String) + pPath->u16Size;
+ VBOXSFCREATESYMLINKREQ *pReq = (VBOXSFCREATESYMLINKREQ *)VbglR0PhysHeapAlloc(cbReq);
+ if (pReq) {
+ RT_ZERO(*pReq);
+ memcpy(&pReq->StrSymlinkPath, pPath, SHFLSTRING_HEADER_SIZE + pPath->u16Size);
+
+ rc = VbglR0SfHostReqCreateSymlinkContig(pSuperInfo->map.root, pTarget, virt_to_phys(pTarget), pReq);
+ if (RT_SUCCESS(rc)) {
+ sf_i->force_restat = 1;
+
+ /*
+ * Instantiate a new inode for the symlink.
+ */
+ rc = vbsf_inode_instantiate(parent, dentry, pPath, &pReq->ObjInfo, SHFL_HANDLE_NIL);
+ if (rc == 0) {
+ SFLOGFLOW(("vbsf_inode_symlink: Successfully created '%s' -> '%s'\n", pPath->String.ach, pTarget->String.ach));
+ pPath = NULL; /* consumed by inode */
+ vbsf_dentry_chain_increase_ttl(dentry);
+ } else {
+ SFLOGFLOW(("vbsf_inode_symlink: Failed to create inode for '%s': %d\n", pPath->String.ach, rc));
+ vbsf_dentry_chain_increase_parent_ttl(dentry);
+ vbsf_dentry_invalidate_ttl(dentry);
+ }
+ } else {
+ int const vrc = rc;
+ if (vrc == VERR_WRITE_PROTECT)
+ rc = -EPERM; /* EPERM: Symlink creation not supported according to the linux manpage as of 2017-09-15.
+ "VBoxInternal2/SharedFoldersEnableSymlinksCreate/<share>" is not 1. */
+ else
+ rc = -RTErrConvertToErrno(vrc);
+ SFLOGFLOW(("vbsf_inode_symlink: VbglR0SfHostReqCreateSymlinkContig failed for '%s' -> '%s': %Rrc (-> %d)\n",
+ pPath->String.ach, pTarget->String.ach, vrc, rc));
+ }
+ VbglR0PhysHeapFree(pReq);
+ } else {
+ SFLOGFLOW(("vbsf_inode_symlink: failed to allocate %u phys heap for the request!\n", cbReq));
+ rc = -ENOMEM;
+ }
+ if (pPath)
+ kfree(pPath);
+ }
+ kfree(pTarget);
+ }
+ return rc;
}
-#endif /* LINUX_VERSION_CODE >= 2.6.0 */
-
-struct inode_operations sf_dir_iops = {
- .lookup = sf_lookup,
- .create = sf_create,
- .mkdir = sf_mkdir,
- .rmdir = sf_rmdir,
- .unlink = sf_unlink,
- .rename = sf_rename,
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
- .revalidate = sf_inode_revalidate
+
+
+/**
+ * Directory inode operations.
+ */
+struct inode_operations vbsf_dir_iops = {
+ .lookup = vbsf_inode_lookup,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
+ .atomic_open = vbsf_inode_atomic_open,
+#endif
+ .create = vbsf_inode_create,
+ .symlink = vbsf_inode_symlink,
+ .mkdir = vbsf_inode_mkdir,
+ .rmdir = vbsf_inode_rmdir,
+ .unlink = vbsf_inode_unlink,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)
+ .rename = vbsf_inode_rename,
#else
- .getattr = sf_getattr,
- .setattr = sf_setattr,
- .symlink = sf_symlink
+ .rename = vbsf_inode_rename_no_flags,
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0)
+ .rename2 = vbsf_inode_rename,
+# endif
#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 18)
+ .getattr = vbsf_inode_getattr,
+#else
+ .revalidate = vbsf_inode_revalidate,
+#endif
+ .setattr = vbsf_inode_setattr,
};
+
diff --git a/ubuntu/vbox/vboxsf/include/VBox/VBoxGuest.h b/ubuntu/vbox/vboxsf/include/VBox/VBoxGuest.h
index 0b9d79f68840..e89e0984d8ea 100644
--- a/ubuntu/vbox/vboxsf/include/VBox/VBoxGuest.h
+++ b/ubuntu/vbox/vboxsf/include/VBox/VBoxGuest.h
@@ -422,7 +422,7 @@ AssertCompileSize(VBGLIOCIDCHGCMFASTCALL, /* 24 + 4 + 1 + 3 + 2*8 + 4 = 0x34 (52
\
(a_pCall)->header.header.size = (a_cbReq) - sizeof(VBGLIOCIDCHGCMFASTCALL); \
(a_pCall)->header.header.version = VBGLREQHDR_VERSION; \
- (a_pCall)->header.header.requestType= (ARCH_BITS == 32 ? VMMDevReq_HGCMCall32 : VMMDevReq_HGCMCall64); \
+ (a_pCall)->header.header.requestType= (ARCH_BITS == 64 ? VMMDevReq_HGCMCall64 : VMMDevReq_HGCMCall32); \
(a_pCall)->header.header.rc = VERR_INTERNAL_ERROR; \
(a_pCall)->header.header.reserved1 = 0; \
(a_pCall)->header.header.fRequestor = VMMDEV_REQUESTOR_KERNEL | VMMDEV_REQUESTOR_USR_DRV_OTHER \
diff --git a/ubuntu/vbox/vboxsf/include/VBox/VBoxGuestLibSharedFoldersInline.h b/ubuntu/vbox/vboxsf/include/VBox/VBoxGuestLibSharedFoldersInline.h
new file mode 100644
index 000000000000..9555cc53a9e3
--- /dev/null
+++ b/ubuntu/vbox/vboxsf/include/VBox/VBoxGuestLibSharedFoldersInline.h
@@ -0,0 +1,1517 @@
+/* $Id: VBoxGuestLibSharedFoldersInline.h $ */
+/** @file
+ * VBoxGuestLib - Shared Folders Host Request Helpers (ring-0).
+ */
+
+/*
+ * Copyright (C) 2006-2019 Oracle Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef VBOX_INCLUDED_VBoxGuestLibSharedFoldersInline_h
+#define VBOX_INCLUDED_VBoxGuestLibSharedFoldersInline_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#include <iprt/types.h>
+#include <iprt/assert.h>
+#include <VBox/VBoxGuest.h>
+#include <VBox/VBoxGuestLib.h>
+#include <VBox/VBoxGuestLibSharedFolders.h>
+#include <VBox/VMMDev.h>
+#include <VBox/shflsvc.h>
+#include <iprt/err.h>
+
+
+/** @defgroup grp_vboxguest_lib_r0_sf_inline Shared Folders Host Request Helpers
+ * @ingroup grp_vboxguest_lib_r0
+ *
+ * @note Using inline functions to avoid wasting precious ring-0 stack space on
+ * passing parameters that ends up in the structure @a pReq points to. It
+ * is also safe to assume that it's faster too. It's worth a few bytes
+ * larger code section in the resulting shared folders driver.
+ *
+ * @note This currently requires a C++ compiler or a C compiler capable of
+ * mixing code and variables (i.e. C99).
+ *
+ * @{
+ */
+
+/** VMMDEV_HVF_XXX (set during init). */
+extern uint32_t g_fHostFeatures;
+extern VBGLSFCLIENT g_SfClient; /**< Move this into the parameters? */
+
+/** Request structure for VbglR0SfHostReqQueryFeatures. */
+typedef struct VBOXSFQUERYFEATURES
+{
+ VBGLIOCIDCHGCMFASTCALL Hdr;
+ VMMDevHGCMCall Call;
+ VBoxSFParmQueryFeatures Parms;
+} VBOXSFQUERYFEATURES;
+
+/**
+ * SHFL_FN_QUERY_FEATURES request.
+ */
+DECLINLINE(int) VbglR0SfHostReqQueryFeatures(VBOXSFQUERYFEATURES *pReq)
+{
+ VBGLIOCIDCHGCMFASTCALL_INIT(&pReq->Hdr, VbglR0PhysHeapGetPhysAddr(pReq), &pReq->Call, g_SfClient.idClient,
+ SHFL_FN_QUERY_FEATURES, SHFL_CPARMS_QUERY_FEATURES, sizeof(*pReq));
+
+ pReq->Parms.f64Features.type = VMMDevHGCMParmType_64bit;
+ pReq->Parms.f64Features.u.value64 = 0;
+
+ pReq->Parms.u32LastFunction.type = VMMDevHGCMParmType_32bit;
+ pReq->Parms.u32LastFunction.u.value32 = 0;
+
+ int vrc = VbglR0HGCMFastCall(g_SfClient.handle, &pReq->Hdr, sizeof(*pReq));
+ if (RT_SUCCESS(vrc))
+ vrc = pReq->Call.header.result;
+
+ /*
+ * Provide fallback values based on g_fHostFeatures to simplify
+ * compatibility with older hosts and avoid duplicating this logic.
+ */
+ if (RT_FAILURE(vrc))
+ {
+ pReq->Parms.f64Features.u.value64 = 0;
+ pReq->Parms.u32LastFunction.u.value32 = g_fHostFeatures & VMMDEV_HVF_HGCM_NO_BOUNCE_PAGE_LIST
+ ? SHFL_FN_SET_FILE_SIZE : SHFL_FN_SET_SYMLINKS;
+ if (vrc == VERR_NOT_SUPPORTED)
+ vrc = VINF_NOT_SUPPORTED;
+ }
+ return vrc;
+}
+
+/**
+ * SHFL_FN_QUERY_FEATURES request, simplified version.
+ */
+DECLINLINE(int) VbglR0SfHostReqQueryFeaturesSimple(uint64_t *pfFeatures, uint32_t *puLastFunction)
+{
+ VBOXSFQUERYFEATURES *pReq = (VBOXSFQUERYFEATURES *)VbglR0PhysHeapAlloc(sizeof(*pReq));
+ if (pReq)
+ {
+ int rc = VbglR0SfHostReqQueryFeatures(pReq);
+ if (pfFeatures)
+ *pfFeatures = pReq->Parms.f64Features.u.value64;
+ if (puLastFunction)
+ *puLastFunction = pReq->Parms.u32LastFunction.u.value32;
+
+ VbglR0PhysHeapFree(pReq);
+ return rc;
+ }
+ return VERR_NO_MEMORY;
+}
+
+
+/** Request structure for VbglR0SfHostReqSetUtf8 and VbglR0SfHostReqSetSymlink. */
+typedef struct VBOXSFNOPARMS
+{
+ VBGLIOCIDCHGCMFASTCALL Hdr;
+ VMMDevHGCMCall Call;
+ /* no parameters */
+} VBOXSFNOPARMS;
+
+/**
+ * Worker for request without any parameters.
+ */
+DECLINLINE(int) VbglR0SfHostReqNoParms(VBOXSFNOPARMS *pReq, uint32_t uFunction, uint32_t cParms)
+{
+ VBGLIOCIDCHGCMFASTCALL_INIT(&pReq->Hdr, VbglR0PhysHeapGetPhysAddr(pReq), &pReq->Call, g_SfClient.idClient,
+ uFunction, cParms, sizeof(*pReq));
+ int vrc = VbglR0HGCMFastCall(g_SfClient.handle, &pReq->Hdr, sizeof(*pReq));
+ if (RT_SUCCESS(vrc))
+ vrc = pReq->Call.header.result;
+ return vrc;
+}
+
+/**
+ * Worker for request without any parameters, simplified.
+ */
+DECLINLINE(int) VbglR0SfHostReqNoParmsSimple(uint32_t uFunction, uint32_t cParms)
+{
+ VBOXSFNOPARMS *pReq = (VBOXSFNOPARMS *)VbglR0PhysHeapAlloc(sizeof(*pReq));
+ if (pReq)
+ {
+ int vrc = VbglR0SfHostReqNoParms(pReq, uFunction, cParms);
+ VbglR0PhysHeapFree(pReq);
+ return vrc;
+ }
+ return VERR_NO_MEMORY;
+}
+
+
+/**
+ * SHFL_F_SET_UTF8 request.
+ */
+DECLINLINE(int) VbglR0SfHostReqSetUtf8(VBOXSFNOPARMS *pReq)
+{
+ return VbglR0SfHostReqNoParms(pReq, SHFL_FN_SET_UTF8, SHFL_CPARMS_SET_UTF8);
+}
+
+/**
+ * SHFL_F_SET_UTF8 request, simplified version.
+ */
+DECLINLINE(int) VbglR0SfHostReqSetUtf8Simple(void)
+{
+ return VbglR0SfHostReqNoParmsSimple(SHFL_FN_SET_UTF8, SHFL_CPARMS_SET_UTF8);
+}
+
+
+/**
+ * SHFL_F_SET_SYMLINKS request.
+ */
+DECLINLINE(int) VbglR0SfHostReqSetSymlinks(VBOXSFNOPARMS *pReq)
+{
+ return VbglR0SfHostReqNoParms(pReq, SHFL_FN_SET_SYMLINKS, SHFL_CPARMS_SET_SYMLINKS);
+}
+
+/**
+ * SHFL_F_SET_SYMLINKS request, simplified version.
+ */
+DECLINLINE(int) VbglR0SfHostReqSetSymlinksSimple(void)
+{
+ return VbglR0SfHostReqNoParmsSimple(SHFL_FN_SET_SYMLINKS, SHFL_CPARMS_SET_SYMLINKS);
+}
+
+
+/** Request structure for VbglR0SfHostReqMapFolderWithBuf. */
+typedef struct VBOXSFMAPFOLDERWITHBUFREQ
+{
+ VBGLIOCIDCHGCMFASTCALL Hdr;
+ VMMDevHGCMCall Call;
+ VBoxSFParmMapFolder Parms;
+ HGCMPageListInfo PgLst;
+} VBOXSFMAPFOLDERWITHBUFREQ;
+
+
+/**
+ * SHFL_FN_MAP_FOLDER request.
+ */
+DECLINLINE(int) VbglR0SfHostReqMapFolderWithContig(VBOXSFMAPFOLDERWITHBUFREQ *pReq, PSHFLSTRING pStrName, RTGCPHYS64 PhysStrName,
+ RTUTF16 wcDelimiter, bool fCaseSensitive)
+{
+ VBGLIOCIDCHGCMFASTCALL_INIT(&pReq->Hdr, VbglR0PhysHeapGetPhysAddr(pReq), &pReq->Call, g_SfClient.idClient,
+ SHFL_FN_MAP_FOLDER, SHFL_CPARMS_MAP_FOLDER, sizeof(*pReq));
+
+ pReq->Parms.id32Root.type = VMMDevHGCMParmType_32bit;
+ pReq->Parms.id32Root.u.value32 = SHFL_ROOT_NIL;
+
+ pReq->Parms.uc32Delimiter.type = VMMDevHGCMParmType_32bit;
+ pReq->Parms.uc32Delimiter.u.value32 = wcDelimiter;
+
+ pReq->Parms.fCaseSensitive.type = VMMDevHGCMParmType_32bit;
+ pReq->Parms.fCaseSensitive.u.value32 = fCaseSensitive;
+
+ if (g_fHostFeatures & VMMDEV_HVF_HGCM_CONTIGUOUS_PAGE_LIST)
+ {
+ pReq->Parms.pStrName.type = VMMDevHGCMParmType_PageList;
+ pReq->Parms.pStrName.u.PageList.size = SHFLSTRING_HEADER_SIZE + pStrName->u16Size;
+ pReq->Parms.pStrName.u.PageList.offset = RT_UOFFSETOF(VBOXSFMAPFOLDERWITHBUFREQ, PgLst) - sizeof(VBGLIOCIDCHGCMFASTCALL);
+ pReq->PgLst.flags = VBOX_HGCM_F_PARM_DIRECTION_BOTH;
+ pReq->PgLst.offFirstPage = (uint16_t)PhysStrName & (uint16_t)(PAGE_OFFSET_MASK);
+ pReq->PgLst.aPages[0] = PhysStrName & ~(RTGCPHYS64)PAGE_OFFSET_MASK;
+ pReq->PgLst.cPages = 1;
+ }
+ else
+ {
+ pReq->Parms.pStrName.type = VMMDevHGCMParmType_LinAddr_In;
+ pReq->Parms.pStrName.u.LinAddr.cb = SHFLSTRING_HEADER_SIZE + pStrName->u16Size;
+ pReq->Parms.pStrName.u.LinAddr.uAddr = (uintptr_t)pStrName;
+ }
+
+ int vrc = VbglR0HGCMFastCall(g_SfClient.handle, &pReq->Hdr, sizeof(*pReq));
+ if (RT_SUCCESS(vrc))
+ vrc = pReq->Call.header.result;
+ return vrc;
+}
+
+/**
+ * SHFL_FN_MAP_FOLDER request.
+ */
+DECLINLINE(int) VbglR0SfHostReqMapFolderWithContigSimple(PSHFLSTRING pStrName, RTGCPHYS64 PhysStrName,
+ RTUTF16 wcDelimiter, bool fCaseSensitive, SHFLROOT *pidRoot)
+{
+ VBOXSFMAPFOLDERWITHBUFREQ *pReq = (VBOXSFMAPFOLDERWITHBUFREQ *)VbglR0PhysHeapAlloc(sizeof(*pReq));
+ if (pReq)
+ {
+ int rc = VbglR0SfHostReqMapFolderWithContig(pReq, pStrName, PhysStrName, wcDelimiter, fCaseSensitive);
+ *pidRoot = RT_SUCCESS(rc) ? pReq->Parms.id32Root.u.value32 : SHFL_ROOT_NIL;
+ VbglR0PhysHeapFree(pReq);
+ return rc;
+ }
+ *pidRoot = SHFL_ROOT_NIL;
+ return VERR_NO_MEMORY;
+}
+
+
+/**
+ * SHFL_FN_MAP_FOLDER request.
+ */
+DECLINLINE(int) VbglR0SfHostReqMapFolderWithBuf(VBOXSFMAPFOLDERWITHBUFREQ *pReq, PSHFLSTRING pStrName,
+ RTUTF16 wcDelimiter, bool fCaseSensitive)
+{
+ return VbglR0SfHostReqMapFolderWithContig(pReq, pStrName, VbglR0PhysHeapGetPhysAddr(pStrName), wcDelimiter, fCaseSensitive);
+}
+
+
+
+/** Request structure used by vboxSfOs2HostReqUnmapFolder. */
+typedef struct VBOXSFUNMAPFOLDERREQ
+{
+ VBGLIOCIDCHGCMFASTCALL Hdr;
+ VMMDevHGCMCall Call;
+ VBoxSFParmUnmapFolder Parms;
+} VBOXSFUNMAPFOLDERREQ;
+
+
+/**
+ * SHFL_FN_UNMAP_FOLDER request.
+ */
+DECLINLINE(int) VbglR0SfHostReqUnmapFolderSimple(uint32_t idRoot)
+{
+ VBOXSFUNMAPFOLDERREQ *pReq = (VBOXSFUNMAPFOLDERREQ *)VbglR0PhysHeapAlloc(sizeof(*pReq));
+ if (pReq)
+ {
+ pReq->Parms.id32Root.type = VMMDevHGCMParmType_32bit;
+ pReq->Parms.id32Root.u.value32 = idRoot;
+
+ VBGLIOCIDCHGCMFASTCALL_INIT(&pReq->Hdr, VbglR0PhysHeapGetPhysAddr(pReq), &pReq->Call, g_SfClient.idClient,
+ SHFL_FN_UNMAP_FOLDER, SHFL_CPARMS_UNMAP_FOLDER, sizeof(*pReq));
+
+ int vrc = VbglR0HGCMFastCall(g_SfClient.handle, &pReq->Hdr, sizeof(*pReq));
+ if (RT_SUCCESS(vrc))
+ vrc = pReq->Call.header.result;
+
+ VbglR0PhysHeapFree(pReq);
+ return vrc;
+ }
+ return VERR_NO_MEMORY;
+}
+
+
+/** Request structure for VbglR0SfHostReqCreate. */
+typedef struct VBOXSFCREATEREQ
+{
+ VBGLIOCIDCHGCMFASTCALL Hdr;
+ VMMDevHGCMCall Call;
+ VBoxSFParmCreate Parms;
+ SHFLCREATEPARMS CreateParms;
+ SHFLSTRING StrPath;
+} VBOXSFCREATEREQ;
+
+/**
+ * SHFL_FN_CREATE request.
+ */
+DECLINLINE(int) VbglR0SfHostReqCreate(SHFLROOT idRoot, VBOXSFCREATEREQ *pReq)
+{
+ uint32_t const cbReq = g_fHostFeatures & VMMDEV_HVF_HGCM_EMBEDDED_BUFFERS
+ ? RT_UOFFSETOF(VBOXSFCREATEREQ, StrPath.String) + pReq->StrPath.u16Size
+ : RT_UOFFSETOF(VBOXSFCREATEREQ, CreateParms);
+ VBGLIOCIDCHGCMFASTCALL_INIT(&pReq->Hdr, VbglR0PhysHeapGetPhysAddr(pReq), &pReq->Call, g_SfClient.idClient,
+ SHFL_FN_CREATE, SHFL_CPARMS_CREATE, cbReq);
+
+ pReq->Parms.id32Root.type = VMMDevHGCMParmType_32bit;
+ pReq->Parms.id32Root.u.value32 = idRoot;
+
+ if (g_fHostFeatures & VMMDEV_HVF_HGCM_EMBEDDED_BUFFERS)
+ {
+ pReq->Parms.pStrPath.type = VMMDevHGCMParmType_Embedded;
+ pReq->Parms.pStrPath.u.Embedded.cbData = SHFLSTRING_HEADER_SIZE + pReq->StrPath.u16Size;
+ pReq->Parms.pStrPath.u.Embedded.offData = RT_UOFFSETOF(VBOXSFCREATEREQ, StrPath) - sizeof(VBGLIOCIDCHGCMFASTCALL);
+ pReq->Parms.pStrPath.u.Embedded.fFlags = VBOX_HGCM_F_PARM_DIRECTION_TO_HOST;
+
+ pReq->Parms.pCreateParms.type = VMMDevHGCMParmType_Embedded;
+ pReq->Parms.pCreateParms.u.Embedded.cbData = sizeof(pReq->CreateParms);
+ pReq->Parms.pCreateParms.u.Embedded.offData = RT_UOFFSETOF(VBOXSFCREATEREQ, CreateParms) - sizeof(VBGLIOCIDCHGCMFASTCALL);
+ pReq->Parms.pCreateParms.u.Embedded.fFlags = VBOX_HGCM_F_PARM_DIRECTION_BOTH;
+ }
+ else
+ {
+ pReq->Parms.pStrPath.type = VMMDevHGCMParmType_LinAddr_In;
+ pReq->Parms.pStrPath.u.LinAddr.cb = SHFLSTRING_HEADER_SIZE + pReq->StrPath.u16Size;
+ pReq->Parms.pStrPath.u.LinAddr.uAddr = (uintptr_t)&pReq->StrPath;
+
+ pReq->Parms.pCreateParms.type = VMMDevHGCMParmType_LinAddr;
+ pReq->Parms.pCreateParms.u.LinAddr.cb = sizeof(pReq->CreateParms);
+ pReq->Parms.pCreateParms.u.LinAddr.uAddr = (uintptr_t)&pReq->CreateParms;
+ }
+
+ int vrc = VbglR0HGCMFastCall(g_SfClient.handle, &pReq->Hdr, cbReq);
+ if (RT_SUCCESS(vrc))
+ vrc = pReq->Call.header.result;
+ return vrc;
+}
+
+
+/** Request structure for VbglR0SfHostReqClose. */
+typedef struct VBOXSFCLOSEREQ
+{
+ VBGLIOCIDCHGCMFASTCALL Hdr;
+ VMMDevHGCMCall Call;
+ VBoxSFParmClose Parms;
+} VBOXSFCLOSEREQ;
+
+/**
+ * SHFL_FN_CLOSE request.
+ */
+DECLINLINE(int) VbglR0SfHostReqClose(SHFLROOT idRoot, VBOXSFCLOSEREQ *pReq, uint64_t hHostFile)
+{
+ VBGLIOCIDCHGCMFASTCALL_INIT(&pReq->Hdr, VbglR0PhysHeapGetPhysAddr(pReq), &pReq->Call, g_SfClient.idClient,
+ SHFL_FN_CLOSE, SHFL_CPARMS_CLOSE, sizeof(*pReq));
+
+ pReq->Parms.id32Root.type = VMMDevHGCMParmType_32bit;
+ pReq->Parms.id32Root.u.value32 = idRoot;
+
+ pReq->Parms.u64Handle.type = VMMDevHGCMParmType_64bit;
+ pReq->Parms.u64Handle.u.value64 = hHostFile;
+
+ int vrc = VbglR0HGCMFastCall(g_SfClient.handle, &pReq->Hdr, sizeof(*pReq));
+ if (RT_SUCCESS(vrc))
+ vrc = pReq->Call.header.result;
+ return vrc;
+}
+
+/**
+ * SHFL_FN_CLOSE request, allocate request buffer.
+ */
+DECLINLINE(int) VbglR0SfHostReqCloseSimple(SHFLROOT idRoot, uint64_t hHostFile)
+{
+ VBOXSFCLOSEREQ *pReq = (VBOXSFCLOSEREQ *)VbglR0PhysHeapAlloc(sizeof(*pReq));
+ if (pReq)
+ {
+ int vrc = VbglR0SfHostReqClose(idRoot, pReq, hHostFile);
+ VbglR0PhysHeapFree(pReq);
+ return vrc;
+ }
+ return VERR_NO_MEMORY;
+}
+
+
+/** Request structure for VbglR0SfHostReqQueryVolInfo. */
+typedef struct VBOXSFVOLINFOREQ
+{
+ VBGLIOCIDCHGCMFASTCALL Hdr;
+ VMMDevHGCMCall Call;
+ VBoxSFParmInformation Parms;
+ SHFLVOLINFO VolInfo;
+} VBOXSFVOLINFOREQ;
+
+/**
+ * SHFL_FN_INFORMATION[SHFL_INFO_VOLUME | SHFL_INFO_GET] request.
+ */
+DECLINLINE(int) VbglR0SfHostReqQueryVolInfo(SHFLROOT idRoot, VBOXSFVOLINFOREQ *pReq, uint64_t hHostFile)
+{
+ uint32_t const cbReq = g_fHostFeatures & VMMDEV_HVF_HGCM_EMBEDDED_BUFFERS
+ ? sizeof(*pReq) : RT_UOFFSETOF(VBOXSFVOLINFOREQ, VolInfo);
+ VBGLIOCIDCHGCMFASTCALL_INIT(&pReq->Hdr, VbglR0PhysHeapGetPhysAddr(pReq), &pReq->Call, g_SfClient.idClient,
+ SHFL_FN_INFORMATION, SHFL_CPARMS_INFORMATION, cbReq);
+
+ pReq->Parms.id32Root.type = VMMDevHGCMParmType_32bit;
+ pReq->Parms.id32Root.u.value32 = idRoot;
+
+ pReq->Parms.u64Handle.type = VMMDevHGCMParmType_64bit;
+ pReq->Parms.u64Handle.u.value64 = hHostFile;
+
+ pReq->Parms.f32Flags.type = VMMDevHGCMParmType_32bit;
+ pReq->Parms.f32Flags.u.value32 = SHFL_INFO_VOLUME | SHFL_INFO_GET;
+
+ pReq->Parms.cb32.type = VMMDevHGCMParmType_32bit;
+ pReq->Parms.cb32.u.value32 = sizeof(pReq->VolInfo);
+
+ if (g_fHostFeatures & VMMDEV_HVF_HGCM_EMBEDDED_BUFFERS)
+ {
+ pReq->Parms.pInfo.type = VMMDevHGCMParmType_Embedded;
+ pReq->Parms.pInfo.u.Embedded.cbData = sizeof(pReq->VolInfo);
+ pReq->Parms.pInfo.u.Embedded.offData = RT_UOFFSETOF(VBOXSFVOLINFOREQ, VolInfo) - sizeof(VBGLIOCIDCHGCMFASTCALL);
+ pReq->Parms.pInfo.u.Embedded.fFlags = VBOX_HGCM_F_PARM_DIRECTION_FROM_HOST;
+ }
+ else
+ {
+ pReq->Parms.pInfo.type = VMMDevHGCMParmType_LinAddr_Out;
+ pReq->Parms.pInfo.u.LinAddr.cb = sizeof(pReq->VolInfo);
+ pReq->Parms.pInfo.u.LinAddr.uAddr = (uintptr_t)&pReq->VolInfo;
+ }
+
+ int vrc = VbglR0HGCMFastCall(g_SfClient.handle, &pReq->Hdr, cbReq);
+ if (RT_SUCCESS(vrc))
+ vrc = pReq->Call.header.result;
+ return vrc;
+}
+
+
+/** Request structure for VbglR0SfHostReqSetObjInfo & VbglR0SfHostReqQueryObjInfo. */
+typedef struct VBOXSFOBJINFOREQ
+{
+ VBGLIOCIDCHGCMFASTCALL Hdr;
+ VMMDevHGCMCall Call;
+ VBoxSFParmInformation Parms;
+ SHFLFSOBJINFO ObjInfo;
+} VBOXSFOBJINFOREQ;
+
+/**
+ * SHFL_FN_INFORMATION[SHFL_INFO_GET | SHFL_INFO_FILE] request.
+ */
+DECLINLINE(int) VbglR0SfHostReqQueryObjInfo(SHFLROOT idRoot, VBOXSFOBJINFOREQ *pReq, uint64_t hHostFile)
+{
+ uint32_t const cbReq = g_fHostFeatures & VMMDEV_HVF_HGCM_EMBEDDED_BUFFERS
+ ? sizeof(*pReq) : RT_UOFFSETOF(VBOXSFOBJINFOREQ, ObjInfo);
+ VBGLIOCIDCHGCMFASTCALL_INIT(&pReq->Hdr, VbglR0PhysHeapGetPhysAddr(pReq), &pReq->Call, g_SfClient.idClient,
+ SHFL_FN_INFORMATION, SHFL_CPARMS_INFORMATION, cbReq);
+
+ pReq->Parms.id32Root.type = VMMDevHGCMParmType_32bit;
+ pReq->Parms.id32Root.u.value32 = idRoot;
+
+ pReq->Parms.u64Handle.type = VMMDevHGCMParmType_64bit;
+ pReq->Parms.u64Handle.u.value64 = hHostFile;
+
+ pReq->Parms.f32Flags.type = VMMDevHGCMParmType_32bit;
+ pReq->Parms.f32Flags.u.value32 = SHFL_INFO_GET | SHFL_INFO_FILE;
+
+ pReq->Parms.cb32.type = VMMDevHGCMParmType_32bit;
+ pReq->Parms.cb32.u.value32 = sizeof(pReq->ObjInfo);
+
+ if (g_fHostFeatures & VMMDEV_HVF_HGCM_EMBEDDED_BUFFERS)
+ {
+ pReq->Parms.pInfo.type = VMMDevHGCMParmType_Embedded;
+ pReq->Parms.pInfo.u.Embedded.cbData = sizeof(pReq->ObjInfo);
+ pReq->Parms.pInfo.u.Embedded.offData = RT_UOFFSETOF(VBOXSFOBJINFOREQ, ObjInfo) - sizeof(VBGLIOCIDCHGCMFASTCALL);
+ pReq->Parms.pInfo.u.Embedded.fFlags = VBOX_HGCM_F_PARM_DIRECTION_FROM_HOST;
+ }
+ else
+ {
+ pReq->Parms.pInfo.type = VMMDevHGCMParmType_LinAddr_Out;
+ pReq->Parms.pInfo.u.LinAddr.cb = sizeof(pReq->ObjInfo);
+ pReq->Parms.pInfo.u.LinAddr.uAddr = (uintptr_t)&pReq->ObjInfo;
+ }
+
+ int vrc = VbglR0HGCMFastCall(g_SfClient.handle, &pReq->Hdr, cbReq);
+ if (RT_SUCCESS(vrc))
+ vrc = pReq->Call.header.result;
+ return vrc;
+}
+
+
+/**
+ * SHFL_FN_INFORMATION[SHFL_INFO_SET | SHFL_INFO_FILE] request.
+ */
+DECLINLINE(int) VbglR0SfHostReqSetObjInfo(SHFLROOT idRoot, VBOXSFOBJINFOREQ *pReq, uint64_t hHostFile)
+{
+ uint32_t const cbReq = g_fHostFeatures & VMMDEV_HVF_HGCM_EMBEDDED_BUFFERS
+ ? sizeof(*pReq) : RT_UOFFSETOF(VBOXSFOBJINFOREQ, ObjInfo);
+ VBGLIOCIDCHGCMFASTCALL_INIT(&pReq->Hdr, VbglR0PhysHeapGetPhysAddr(pReq), &pReq->Call, g_SfClient.idClient,
+ SHFL_FN_INFORMATION, SHFL_CPARMS_INFORMATION, cbReq);
+
+ pReq->Parms.id32Root.type = VMMDevHGCMParmType_32bit;
+ pReq->Parms.id32Root.u.value32 = idRoot;
+
+ pReq->Parms.u64Handle.type = VMMDevHGCMParmType_64bit;
+ pReq->Parms.u64Handle.u.value64 = hHostFile;
+
+ pReq->Parms.f32Flags.type = VMMDevHGCMParmType_32bit;
+ pReq->Parms.f32Flags.u.value32 = SHFL_INFO_SET | SHFL_INFO_FILE;
+
+ pReq->Parms.cb32.type = VMMDevHGCMParmType_32bit;
+ pReq->Parms.cb32.u.value32 = sizeof(pReq->ObjInfo);
+
+ if (g_fHostFeatures & VMMDEV_HVF_HGCM_EMBEDDED_BUFFERS)
+ {
+ pReq->Parms.pInfo.type = VMMDevHGCMParmType_Embedded;
+ pReq->Parms.pInfo.u.Embedded.cbData = sizeof(pReq->ObjInfo);
+ pReq->Parms.pInfo.u.Embedded.offData = RT_UOFFSETOF(VBOXSFOBJINFOREQ, ObjInfo) - sizeof(VBGLIOCIDCHGCMFASTCALL);
+ pReq->Parms.pInfo.u.Embedded.fFlags = VBOX_HGCM_F_PARM_DIRECTION_BOTH;
+ }
+ else
+ {
+ pReq->Parms.pInfo.type = VMMDevHGCMParmType_LinAddr;
+ pReq->Parms.pInfo.u.LinAddr.cb = sizeof(pReq->ObjInfo);
+ pReq->Parms.pInfo.u.LinAddr.uAddr = (uintptr_t)&pReq->ObjInfo;
+ }
+
+ int vrc = VbglR0HGCMFastCall(g_SfClient.handle, &pReq->Hdr, cbReq);
+ if (RT_SUCCESS(vrc))
+ vrc = pReq->Call.header.result;
+ return vrc;
+}
+
+
+/**
+ * SHFL_FN_INFORMATION[SHFL_INFO_SET | SHFL_INFO_SIZE] request.
+ */
+DECLINLINE(int) VbglR0SfHostReqSetFileSizeOld(SHFLROOT idRoot, VBOXSFOBJINFOREQ *pReq, uint64_t hHostFile)
+{
+ uint32_t const cbReq = g_fHostFeatures & VMMDEV_HVF_HGCM_EMBEDDED_BUFFERS
+ ? sizeof(*pReq) : RT_UOFFSETOF(VBOXSFOBJINFOREQ, ObjInfo);
+ VBGLIOCIDCHGCMFASTCALL_INIT(&pReq->Hdr, VbglR0PhysHeapGetPhysAddr(pReq), &pReq->Call, g_SfClient.idClient,
+ SHFL_FN_INFORMATION, SHFL_CPARMS_INFORMATION, cbReq);
+
+ pReq->Parms.id32Root.type = VMMDevHGCMParmType_32bit;
+ pReq->Parms.id32Root.u.value32 = idRoot;
+
+ pReq->Parms.u64Handle.type = VMMDevHGCMParmType_64bit;
+ pReq->Parms.u64Handle.u.value64 = hHostFile;
+
+ pReq->Parms.f32Flags.type = VMMDevHGCMParmType_32bit;
+ pReq->Parms.f32Flags.u.value32 = SHFL_INFO_SET | SHFL_INFO_SIZE;
+
+ pReq->Parms.cb32.type = VMMDevHGCMParmType_32bit;
+ pReq->Parms.cb32.u.value32 = sizeof(pReq->ObjInfo);
+
+ if (g_fHostFeatures & VMMDEV_HVF_HGCM_EMBEDDED_BUFFERS)
+ {
+ pReq->Parms.pInfo.type = VMMDevHGCMParmType_Embedded;
+ pReq->Parms.pInfo.u.Embedded.cbData = sizeof(pReq->ObjInfo);
+ pReq->Parms.pInfo.u.Embedded.offData = RT_UOFFSETOF(VBOXSFOBJINFOREQ, ObjInfo) - sizeof(VBGLIOCIDCHGCMFASTCALL);
+ pReq->Parms.pInfo.u.Embedded.fFlags = VBOX_HGCM_F_PARM_DIRECTION_BOTH;
+ }
+ else
+ {
+ pReq->Parms.pInfo.type = VMMDevHGCMParmType_LinAddr;
+ pReq->Parms.pInfo.u.LinAddr.cb = sizeof(pReq->ObjInfo);
+ pReq->Parms.pInfo.u.LinAddr.uAddr = (uintptr_t)&pReq->ObjInfo;
+ }
+
+ int vrc = VbglR0HGCMFastCall(g_SfClient.handle, &pReq->Hdr, cbReq);
+ if (RT_SUCCESS(vrc))
+ vrc = pReq->Call.header.result;
+ return vrc;
+}
+
+
+/** Request structure for VbglR0SfHostReqSetObjInfo. */
+typedef struct VBOXSFOBJINFOWITHBUFREQ
+{
+ VBGLIOCIDCHGCMFASTCALL Hdr;
+ VMMDevHGCMCall Call;
+ VBoxSFParmInformation Parms;
+ HGCMPageListInfo PgLst;
+} VBOXSFOBJINFOWITHBUFREQ;
+
+/**
+ * SHFL_FN_INFORMATION[SHFL_INFO_SET | SHFL_INFO_FILE] request, with separate
+ * buffer (on the physical heap).
+ */
+DECLINLINE(int) VbglR0SfHostReqSetObjInfoWithBuf(SHFLROOT idRoot, VBOXSFOBJINFOWITHBUFREQ *pReq, uint64_t hHostFile,
+ PSHFLFSOBJINFO pObjInfo, uint32_t offObjInfoInAlloc)
+{
+ VBGLIOCIDCHGCMFASTCALL_INIT(&pReq->Hdr, VbglR0PhysHeapGetPhysAddr(pReq), &pReq->Call, g_SfClient.idClient,
+ SHFL_FN_INFORMATION, SHFL_CPARMS_INFORMATION, sizeof(*pReq));
+
+ pReq->Parms.id32Root.type = VMMDevHGCMParmType_32bit;
+ pReq->Parms.id32Root.u.value32 = idRoot;
+
+ pReq->Parms.u64Handle.type = VMMDevHGCMParmType_64bit;
+ pReq->Parms.u64Handle.u.value64 = hHostFile;
+
+ pReq->Parms.f32Flags.type = VMMDevHGCMParmType_32bit;
+ pReq->Parms.f32Flags.u.value32 = SHFL_INFO_SET | SHFL_INFO_FILE;
+
+ pReq->Parms.cb32.type = VMMDevHGCMParmType_32bit;
+ pReq->Parms.cb32.u.value32 = sizeof(*pObjInfo);
+
+ if (g_fHostFeatures & VMMDEV_HVF_HGCM_CONTIGUOUS_PAGE_LIST)
+ {
+ pReq->Parms.pInfo.type = VMMDevHGCMParmType_ContiguousPageList;
+ pReq->Parms.pInfo.u.PageList.size = sizeof(*pObjInfo);
+ pReq->Parms.pInfo.u.PageList.offset = RT_UOFFSETOF(VBOXSFOBJINFOREQ, ObjInfo) - sizeof(VBGLIOCIDCHGCMFASTCALL);
+ pReq->PgLst.flags = VBOX_HGCM_F_PARM_DIRECTION_BOTH;
+ pReq->PgLst.aPages[0] = VbglR0PhysHeapGetPhysAddr((uint8_t *)pObjInfo - offObjInfoInAlloc) + offObjInfoInAlloc;
+ pReq->PgLst.offFirstPage = (uint16_t)(pReq->PgLst.aPages[0] & PAGE_OFFSET_MASK);
+ pReq->PgLst.aPages[0] &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
+ pReq->PgLst.cPages = 1;
+ }
+ else
+ {
+ pReq->Parms.pInfo.type = VMMDevHGCMParmType_LinAddr;
+ pReq->Parms.pInfo.u.LinAddr.cb = sizeof(*pObjInfo);
+ pReq->Parms.pInfo.u.LinAddr.uAddr = (uintptr_t)pObjInfo;
+ }
+
+ int vrc = VbglR0HGCMFastCall(g_SfClient.handle, &pReq->Hdr, sizeof(*pReq));
+ if (RT_SUCCESS(vrc))
+ vrc = pReq->Call.header.result;
+ return vrc;
+}
+
+
+/** Request structure for VbglR0SfHostReqRemove. */
+typedef struct VBOXSFREMOVEREQ
+{
+ VBGLIOCIDCHGCMFASTCALL Hdr;
+ VMMDevHGCMCall Call;
+ VBoxSFParmRemove Parms;
+ SHFLSTRING StrPath;
+} VBOXSFREMOVEREQ;
+
+/**
+ * SHFL_FN_REMOVE request.
+ */
+DECLINLINE(int) VbglR0SfHostReqRemove(SHFLROOT idRoot, VBOXSFREMOVEREQ *pReq, uint32_t fFlags)
+{
+ uint32_t const cbReq = RT_UOFFSETOF(VBOXSFREMOVEREQ, StrPath.String)
+ + (g_fHostFeatures & VMMDEV_HVF_HGCM_EMBEDDED_BUFFERS ? pReq->StrPath.u16Size : 0);
+ VBGLIOCIDCHGCMFASTCALL_INIT(&pReq->Hdr, VbglR0PhysHeapGetPhysAddr(pReq), &pReq->Call, g_SfClient.idClient,
+ SHFL_FN_REMOVE, SHFL_CPARMS_REMOVE, cbReq);
+
+ pReq->Parms.id32Root.type = VMMDevHGCMParmType_32bit;
+ pReq->Parms.id32Root.u.value32 = idRoot;
+
+ if (g_fHostFeatures & VMMDEV_HVF_HGCM_EMBEDDED_BUFFERS)
+ {
+ pReq->Parms.pStrPath.type = VMMDevHGCMParmType_Embedded;
+ pReq->Parms.pStrPath.u.Embedded.cbData = SHFLSTRING_HEADER_SIZE + pReq->StrPath.u16Size;
+ pReq->Parms.pStrPath.u.Embedded.offData = RT_UOFFSETOF(VBOXSFREMOVEREQ, StrPath) - sizeof(VBGLIOCIDCHGCMFASTCALL);
+ pReq->Parms.pStrPath.u.Embedded.fFlags = VBOX_HGCM_F_PARM_DIRECTION_TO_HOST;
+ }
+ else
+ {
+ pReq->Parms.pStrPath.type = VMMDevHGCMParmType_LinAddr_In;
+ pReq->Parms.pStrPath.u.LinAddr.cb = SHFLSTRING_HEADER_SIZE + pReq->StrPath.u16Size;
+ pReq->Parms.pStrPath.u.LinAddr.uAddr = (uintptr_t)&pReq->StrPath;
+ }
+
+ pReq->Parms.f32Flags.type = VMMDevHGCMParmType_32bit;
+ pReq->Parms.f32Flags.u.value32 = fFlags;
+
+ int vrc = VbglR0HGCMFastCall(g_SfClient.handle, &pReq->Hdr, cbReq);
+ if (RT_SUCCESS(vrc))
+ vrc = pReq->Call.header.result;
+ return vrc;
+}
+
+
+/** Request structure for VbglR0SfHostReqRenameWithSrcContig and
+ * VbglR0SfHostReqRenameWithSrcBuf. */
+typedef struct VBOXSFRENAMEWITHSRCBUFREQ
+{
+ VBGLIOCIDCHGCMFASTCALL Hdr;
+ VMMDevHGCMCall Call;
+ VBoxSFParmRename Parms;
+ HGCMPageListInfo PgLst;
+ SHFLSTRING StrDstPath;
+} VBOXSFRENAMEWITHSRCBUFREQ;
+
+
+/**
+ * SHFL_FN_REMOVE request.
+ */
+DECLINLINE(int) VbglR0SfHostReqRenameWithSrcContig(SHFLROOT idRoot, VBOXSFRENAMEWITHSRCBUFREQ *pReq,
+ PSHFLSTRING pSrcStr, RTGCPHYS64 PhysSrcStr, uint32_t fFlags)
+{
+ uint32_t const cbReq = RT_UOFFSETOF(VBOXSFRENAMEWITHSRCBUFREQ, StrDstPath.String)
+ + (g_fHostFeatures & VMMDEV_HVF_HGCM_EMBEDDED_BUFFERS ? pReq->StrDstPath.u16Size : 0);
+ VBGLIOCIDCHGCMFASTCALL_INIT(&pReq->Hdr, VbglR0PhysHeapGetPhysAddr(pReq), &pReq->Call, g_SfClient.idClient,
+ SHFL_FN_RENAME, SHFL_CPARMS_RENAME, cbReq);
+
+ pReq->Parms.id32Root.type = VMMDevHGCMParmType_32bit;
+ pReq->Parms.id32Root.u.value32 = idRoot;
+
+ if (g_fHostFeatures & VMMDEV_HVF_HGCM_CONTIGUOUS_PAGE_LIST)
+ {
+ pReq->Parms.pStrSrcPath.type = VMMDevHGCMParmType_ContiguousPageList;
+ pReq->Parms.pStrSrcPath.u.PageList.size = SHFLSTRING_HEADER_SIZE + pSrcStr->u16Size;
+ pReq->Parms.pStrSrcPath.u.PageList.offset = RT_UOFFSETOF(VBOXSFRENAMEWITHSRCBUFREQ, PgLst)
+ - sizeof(VBGLIOCIDCHGCMFASTCALL);
+ pReq->PgLst.flags = VBOX_HGCM_F_PARM_DIRECTION_TO_HOST;
+ pReq->PgLst.offFirstPage = (uint16_t)PhysSrcStr & (uint16_t)(PAGE_OFFSET_MASK);
+ pReq->PgLst.aPages[0] = PhysSrcStr & ~(RTGCPHYS64)PAGE_OFFSET_MASK;
+ pReq->PgLst.cPages = 1;
+ }
+ else
+ {
+ pReq->Parms.pStrSrcPath.type = VMMDevHGCMParmType_LinAddr_In;
+ pReq->Parms.pStrSrcPath.u.LinAddr.cb = SHFLSTRING_HEADER_SIZE + pSrcStr->u16Size;
+ pReq->Parms.pStrSrcPath.u.LinAddr.uAddr = (uintptr_t)pSrcStr;
+ }
+
+ if (g_fHostFeatures & VMMDEV_HVF_HGCM_EMBEDDED_BUFFERS)
+ {
+ pReq->Parms.pStrDstPath.type = VMMDevHGCMParmType_Embedded;
+ pReq->Parms.pStrDstPath.u.Embedded.cbData = SHFLSTRING_HEADER_SIZE + pReq->StrDstPath.u16Size;
+ pReq->Parms.pStrDstPath.u.Embedded.offData = RT_UOFFSETOF(VBOXSFRENAMEWITHSRCBUFREQ, StrDstPath)
+ - sizeof(VBGLIOCIDCHGCMFASTCALL);
+ pReq->Parms.pStrDstPath.u.Embedded.fFlags = VBOX_HGCM_F_PARM_DIRECTION_TO_HOST;
+ }
+ else
+ {
+ pReq->Parms.pStrDstPath.type = VMMDevHGCMParmType_LinAddr_In;
+ pReq->Parms.pStrDstPath.u.LinAddr.cb = SHFLSTRING_HEADER_SIZE + pReq->StrDstPath.u16Size;
+ pReq->Parms.pStrDstPath.u.LinAddr.uAddr = (uintptr_t)&pReq->StrDstPath;
+ }
+
+ pReq->Parms.f32Flags.type = VMMDevHGCMParmType_32bit;
+ pReq->Parms.f32Flags.u.value32 = fFlags;
+
+ int vrc = VbglR0HGCMFastCall(g_SfClient.handle, &pReq->Hdr, cbReq);
+ if (RT_SUCCESS(vrc))
+ vrc = pReq->Call.header.result;
+ return vrc;
+}
+
+
+/**
+ * SHFL_FN_REMOVE request.
+ */
+DECLINLINE(int) VbglR0SfHostReqRenameWithSrcBuf(SHFLROOT idRoot, VBOXSFRENAMEWITHSRCBUFREQ *pReq,
+ PSHFLSTRING pSrcStr, uint32_t fFlags)
+{
+ return VbglR0SfHostReqRenameWithSrcContig(idRoot, pReq, pSrcStr, VbglR0PhysHeapGetPhysAddr(pSrcStr), fFlags);
+}
+
+
+/** Request structure for VbglR0SfHostReqFlush. */
+typedef struct VBOXSFFLUSHREQ
+{
+ VBGLIOCIDCHGCMFASTCALL Hdr;
+ VMMDevHGCMCall Call;
+ VBoxSFParmFlush Parms;
+} VBOXSFFLUSHREQ;
+
+/**
+ * SHFL_FN_FLUSH request.
+ */
+DECLINLINE(int) VbglR0SfHostReqFlush(SHFLROOT idRoot, VBOXSFFLUSHREQ *pReq, uint64_t hHostFile)
+{
+ VBGLIOCIDCHGCMFASTCALL_INIT(&pReq->Hdr, VbglR0PhysHeapGetPhysAddr(pReq), &pReq->Call, g_SfClient.idClient,
+ SHFL_FN_FLUSH, SHFL_CPARMS_FLUSH, sizeof(*pReq));
+
+ pReq->Parms.id32Root.type = VMMDevHGCMParmType_32bit;
+ pReq->Parms.id32Root.u.value32 = idRoot;
+
+ pReq->Parms.u64Handle.type = VMMDevHGCMParmType_64bit;
+ pReq->Parms.u64Handle.u.value64 = hHostFile;
+
+ int vrc = VbglR0HGCMFastCall(g_SfClient.handle, &pReq->Hdr, sizeof(*pReq));
+ if (RT_SUCCESS(vrc))
+ vrc = pReq->Call.header.result;
+ return vrc;
+}
+
+/**
+ * SHFL_FN_FLUSH request, allocate request buffer.
+ */
+DECLINLINE(int) VbglR0SfHostReqFlushSimple(SHFLROOT idRoot, uint64_t hHostFile)
+{
+ VBOXSFFLUSHREQ *pReq = (VBOXSFFLUSHREQ *)VbglR0PhysHeapAlloc(sizeof(*pReq));
+ if (pReq)
+ {
+ int vrc = VbglR0SfHostReqFlush(idRoot, pReq, hHostFile);
+ VbglR0PhysHeapFree(pReq);
+ return vrc;
+ }
+ return VERR_NO_MEMORY;
+}
+
+
+/** Request structure for VbglR0SfHostReqSetFileSize. */
+typedef struct VBOXSFSETFILESIZEREQ
+{
+ VBGLIOCIDCHGCMFASTCALL Hdr;
+ VMMDevHGCMCall Call;
+ VBoxSFParmSetFileSize Parms;
+} VBOXSFSETFILESIZEREQ;
+
+/**
+ * SHFL_FN_SET_FILE_SIZE request.
+ */
+DECLINLINE(int) VbglR0SfHostReqSetFileSize(SHFLROOT idRoot, VBOXSFSETFILESIZEREQ *pReq, uint64_t hHostFile, uint64_t cbNewSize)
+{
+ VBGLIOCIDCHGCMFASTCALL_INIT(&pReq->Hdr, VbglR0PhysHeapGetPhysAddr(pReq), &pReq->Call, g_SfClient.idClient,
+ SHFL_FN_SET_FILE_SIZE, SHFL_CPARMS_SET_FILE_SIZE, sizeof(*pReq));
+
+ pReq->Parms.id32Root.type = VMMDevHGCMParmType_32bit;
+ pReq->Parms.id32Root.u.value32 = idRoot;
+
+ pReq->Parms.u64Handle.type = VMMDevHGCMParmType_64bit;
+ pReq->Parms.u64Handle.u.value64 = hHostFile;
+
+ pReq->Parms.cb64NewSize.type = VMMDevHGCMParmType_64bit;
+ pReq->Parms.cb64NewSize.u.value64 = cbNewSize;
+
+ int vrc = VbglR0HGCMFastCall(g_SfClient.handle, &pReq->Hdr, sizeof(*pReq));
+ if (RT_SUCCESS(vrc))
+ vrc = pReq->Call.header.result;
+ return vrc;
+}
+
+/**
+ * SHFL_FN_SET_FILE_SIZE request, allocate request buffer.
+ */
+DECLINLINE(int) VbglR0SfHostReqSetFileSizeSimple(SHFLROOT idRoot, uint64_t hHostFile, uint64_t cbNewSize)
+{
+ VBOXSFSETFILESIZEREQ *pReq = (VBOXSFSETFILESIZEREQ *)VbglR0PhysHeapAlloc(sizeof(*pReq));
+ if (pReq)
+ {
+ int vrc = VbglR0SfHostReqSetFileSize(idRoot, pReq, hHostFile, cbNewSize);
+ VbglR0PhysHeapFree(pReq);
+ return vrc;
+ }
+ return VERR_NO_MEMORY;
+}
+
+
+/** Request structure for VbglR0SfHostReqReadEmbedded. */
+typedef struct VBOXSFREADEMBEDDEDREQ
+{
+ VBGLIOCIDCHGCMFASTCALL Hdr;
+ VMMDevHGCMCall Call;
+ VBoxSFParmRead Parms;
+ uint8_t abData[RT_FLEXIBLE_ARRAY];
+} VBOXSFREADEMBEDDEDREQ;
+
+/**
+ * SHFL_FN_READ request using embedded data buffer.
+ */
+DECLINLINE(int) VbglR0SfHostReqReadEmbedded(SHFLROOT idRoot, VBOXSFREADEMBEDDEDREQ *pReq, uint64_t hHostFile,
+ uint64_t offRead, uint32_t cbToRead)
+{
+ uint32_t const cbReq = RT_UOFFSETOF(VBOXSFREADEMBEDDEDREQ, abData[0])
+ + (g_fHostFeatures & VMMDEV_HVF_HGCM_EMBEDDED_BUFFERS ? cbToRead : 0);
+ VBGLIOCIDCHGCMFASTCALL_INIT(&pReq->Hdr, VbglR0PhysHeapGetPhysAddr(pReq), &pReq->Call, g_SfClient.idClient,
+ SHFL_FN_READ, SHFL_CPARMS_READ, cbReq);
+
+ pReq->Parms.id32Root.type = VMMDevHGCMParmType_32bit;
+ pReq->Parms.id32Root.u.value32 = idRoot;
+
+ pReq->Parms.u64Handle.type = VMMDevHGCMParmType_64bit;
+ pReq->Parms.u64Handle.u.value64 = hHostFile;
+
+ pReq->Parms.off64Read.type = VMMDevHGCMParmType_64bit;
+ pReq->Parms.off64Read.u.value64 = offRead;
+
+ pReq->Parms.cb32Read.type = VMMDevHGCMParmType_32bit;
+ pReq->Parms.cb32Read.u.value32 = cbToRead;
+
+ if (g_fHostFeatures & VMMDEV_HVF_HGCM_EMBEDDED_BUFFERS)
+ {
+ pReq->Parms.pBuf.type = VMMDevHGCMParmType_Embedded;
+ pReq->Parms.pBuf.u.Embedded.cbData = cbToRead;
+ pReq->Parms.pBuf.u.Embedded.offData = RT_UOFFSETOF(VBOXSFREADEMBEDDEDREQ, abData[0]) - sizeof(VBGLIOCIDCHGCMFASTCALL);
+ pReq->Parms.pBuf.u.Embedded.fFlags = VBOX_HGCM_F_PARM_DIRECTION_FROM_HOST;
+ }
+ else
+ {
+ pReq->Parms.pBuf.type = VMMDevHGCMParmType_LinAddr_Out;
+ pReq->Parms.pBuf.u.LinAddr.cb = cbToRead;
+ pReq->Parms.pBuf.u.LinAddr.uAddr = (uintptr_t)&pReq->abData[0];
+ }
+
+ int vrc = VbglR0HGCMFastCall(g_SfClient.handle, &pReq->Hdr, cbReq);
+ if (RT_SUCCESS(vrc))
+ vrc = pReq->Call.header.result;
+ return vrc;
+}
+
+
+/** Request structure for vboxSfOs2HostReqRead & VbglR0SfHostReqReadContig. */
+typedef struct VBOXSFREADPGLSTREQ
+{
+ VBGLIOCIDCHGCMFASTCALL Hdr;
+ VMMDevHGCMCall Call;
+ VBoxSFParmRead Parms;
+ HGCMPageListInfo PgLst;
+} VBOXSFREADPGLSTREQ;
+
+/**
+ * SHFL_FN_READ request using page list for data buffer (caller populated).
+ */
+DECLINLINE(int) VbglR0SfHostReqReadPgLst(SHFLROOT idRoot, VBOXSFREADPGLSTREQ *pReq, uint64_t hHostFile,
+ uint64_t offRead, uint32_t cbToRead, uint32_t cPages)
+{
+ VBGLIOCIDCHGCMFASTCALL_INIT(&pReq->Hdr, VbglR0PhysHeapGetPhysAddr(pReq), &pReq->Call, g_SfClient.idClient,
+ SHFL_FN_READ, SHFL_CPARMS_READ,
+ RT_UOFFSETOF_DYN(VBOXSFREADPGLSTREQ, PgLst.aPages[cPages]));
+
+ pReq->Parms.id32Root.type = VMMDevHGCMParmType_32bit;
+ pReq->Parms.id32Root.u.value32 = idRoot;
+
+ pReq->Parms.u64Handle.type = VMMDevHGCMParmType_64bit;
+ pReq->Parms.u64Handle.u.value64 = hHostFile;
+
+ pReq->Parms.off64Read.type = VMMDevHGCMParmType_64bit;
+ pReq->Parms.off64Read.u.value64 = offRead;
+
+ pReq->Parms.cb32Read.type = VMMDevHGCMParmType_32bit;
+ pReq->Parms.cb32Read.u.value32 = cbToRead;
+
+ pReq->Parms.pBuf.type = g_fHostFeatures & VMMDEV_HVF_HGCM_NO_BOUNCE_PAGE_LIST
+ ? VMMDevHGCMParmType_NoBouncePageList : VMMDevHGCMParmType_PageList;
+ pReq->Parms.pBuf.u.PageList.size = cbToRead;
+ pReq->Parms.pBuf.u.PageList.offset = RT_UOFFSETOF(VBOXSFREADPGLSTREQ, PgLst) - sizeof(VBGLIOCIDCHGCMFASTCALL);
+ pReq->PgLst.flags = VBOX_HGCM_F_PARM_DIRECTION_FROM_HOST;
+ pReq->PgLst.cPages = (uint16_t)cPages;
+ AssertReturn(cPages <= UINT16_MAX, VERR_OUT_OF_RANGE);
+ /* caller sets offset */
+
+ int vrc = VbglR0HGCMFastCall(g_SfClient.handle, &pReq->Hdr,
+ RT_UOFFSETOF_DYN(VBOXSFREADPGLSTREQ, PgLst.aPages[cPages]));
+ if (RT_SUCCESS(vrc))
+ vrc = pReq->Call.header.result;
+ return vrc;
+}
+
+
+/**
+ * SHFL_FN_READ request using a physically contiguous buffer.
+ */
+DECLINLINE(int) VbglR0SfHostReqReadContig(SHFLROOT idRoot, VBOXSFREADPGLSTREQ *pReq, uint64_t hHostFile,
+ uint64_t offRead, uint32_t cbToRead, void *pvBuffer, RTGCPHYS64 PhysBuffer)
+{
+ VBGLIOCIDCHGCMFASTCALL_INIT(&pReq->Hdr, VbglR0PhysHeapGetPhysAddr(pReq), &pReq->Call, g_SfClient.idClient,
+ SHFL_FN_READ, SHFL_CPARMS_READ, RT_UOFFSETOF_DYN(VBOXSFREADPGLSTREQ, PgLst.aPages[1]));
+
+ pReq->Parms.id32Root.type = VMMDevHGCMParmType_32bit;
+ pReq->Parms.id32Root.u.value32 = idRoot;
+
+ pReq->Parms.u64Handle.type = VMMDevHGCMParmType_64bit;
+ pReq->Parms.u64Handle.u.value64 = hHostFile;
+
+ pReq->Parms.off64Read.type = VMMDevHGCMParmType_64bit;
+ pReq->Parms.off64Read.u.value64 = offRead;
+
+ pReq->Parms.cb32Read.type = VMMDevHGCMParmType_32bit;
+ pReq->Parms.cb32Read.u.value32 = cbToRead;
+
+ if (g_fHostFeatures & VMMDEV_HVF_HGCM_CONTIGUOUS_PAGE_LIST)
+ {
+ pReq->Parms.pBuf.type = VMMDevHGCMParmType_ContiguousPageList;
+ pReq->Parms.pBuf.u.PageList.size = cbToRead;
+ pReq->Parms.pBuf.u.PageList.offset = RT_UOFFSETOF(VBOXSFREADPGLSTREQ, PgLst) - sizeof(VBGLIOCIDCHGCMFASTCALL);
+ pReq->PgLst.flags = VBOX_HGCM_F_PARM_DIRECTION_FROM_HOST;
+ pReq->PgLst.offFirstPage = (uint16_t)(PhysBuffer & PAGE_OFFSET_MASK);
+ pReq->PgLst.cPages = 1;
+ pReq->PgLst.aPages[0] = PhysBuffer & ~(RTGCPHYS64)PAGE_OFFSET_MASK;
+ }
+ else
+ {
+ pReq->Parms.pBuf.type = VMMDevHGCMParmType_LinAddr_Out;
+ pReq->Parms.pBuf.u.LinAddr.cb = cbToRead;
+ pReq->Parms.pBuf.u.LinAddr.uAddr = (uintptr_t)pvBuffer;
+ }
+
+ int vrc = VbglR0HGCMFastCall(g_SfClient.handle, &pReq->Hdr, RT_UOFFSETOF_DYN(VBOXSFREADPGLSTREQ, PgLst.aPages[1]));
+ if (RT_SUCCESS(vrc))
+ vrc = pReq->Call.header.result;
+ return vrc;
+}
+
+
+
+/** Request structure for VbglR0SfHostReqWriteEmbedded. */
+typedef struct VBOXSFWRITEEMBEDDEDREQ
+{
+ VBGLIOCIDCHGCMFASTCALL Hdr;
+ VMMDevHGCMCall Call;
+ VBoxSFParmWrite Parms;
+ uint8_t abData[RT_FLEXIBLE_ARRAY];
+} VBOXSFWRITEEMBEDDEDREQ;
+
+/**
+ * SHFL_FN_WRITE request using embedded data buffer.
+ */
+DECLINLINE(int) VbglR0SfHostReqWriteEmbedded(SHFLROOT idRoot, VBOXSFWRITEEMBEDDEDREQ *pReq, uint64_t hHostFile,
+ uint64_t offWrite, uint32_t cbToWrite)
+{
+ uint32_t const cbReq = RT_UOFFSETOF(VBOXSFWRITEEMBEDDEDREQ, abData[0])
+ + (g_fHostFeatures & VMMDEV_HVF_HGCM_EMBEDDED_BUFFERS ? cbToWrite : 0);
+ VBGLIOCIDCHGCMFASTCALL_INIT(&pReq->Hdr, VbglR0PhysHeapGetPhysAddr(pReq), &pReq->Call, g_SfClient.idClient,
+ SHFL_FN_WRITE, SHFL_CPARMS_WRITE, cbReq);
+
+ pReq->Parms.id32Root.type = VMMDevHGCMParmType_32bit;
+ pReq->Parms.id32Root.u.value32 = idRoot;
+
+ pReq->Parms.u64Handle.type = VMMDevHGCMParmType_64bit;
+ pReq->Parms.u64Handle.u.value64 = hHostFile;
+
+ pReq->Parms.off64Write.type = VMMDevHGCMParmType_64bit;
+ pReq->Parms.off64Write.u.value64 = offWrite;
+
+ pReq->Parms.cb32Write.type = VMMDevHGCMParmType_32bit;
+ pReq->Parms.cb32Write.u.value32 = cbToWrite;
+
+ if (g_fHostFeatures & VMMDEV_HVF_HGCM_EMBEDDED_BUFFERS)
+ {
+ pReq->Parms.pBuf.type = VMMDevHGCMParmType_Embedded;
+ pReq->Parms.pBuf.u.Embedded.cbData = cbToWrite;
+ pReq->Parms.pBuf.u.Embedded.offData = RT_UOFFSETOF(VBOXSFWRITEEMBEDDEDREQ, abData[0]) - sizeof(VBGLIOCIDCHGCMFASTCALL);
+ pReq->Parms.pBuf.u.Embedded.fFlags = VBOX_HGCM_F_PARM_DIRECTION_TO_HOST;
+ }
+ else
+ {
+ pReq->Parms.pBuf.type = VMMDevHGCMParmType_LinAddr_In;
+ pReq->Parms.pBuf.u.LinAddr.cb = cbToWrite;
+ pReq->Parms.pBuf.u.LinAddr.uAddr = (uintptr_t)&pReq->abData[0];
+ }
+
+ int vrc = VbglR0HGCMFastCall(g_SfClient.handle, &pReq->Hdr, cbReq);
+ if (RT_SUCCESS(vrc))
+ vrc = pReq->Call.header.result;
+ return vrc;
+}
+
+
+/** Request structure for vboxSfOs2HostReqWrite and VbglR0SfHostReqWriteContig. */
+typedef struct VBOXSFWRITEPGLSTREQ
+{
+ VBGLIOCIDCHGCMFASTCALL Hdr;
+ VMMDevHGCMCall Call;
+ VBoxSFParmWrite Parms;
+ HGCMPageListInfo PgLst;
+} VBOXSFWRITEPGLSTREQ;
+
+/**
+ * SHFL_FN_WRITE request using page list for data buffer (caller populated).
+ */
+DECLINLINE(int) VbglR0SfHostReqWritePgLst(SHFLROOT idRoot, VBOXSFWRITEPGLSTREQ *pReq, uint64_t hHostFile,
+ uint64_t offWrite, uint32_t cbToWrite, uint32_t cPages)
+{
+ VBGLIOCIDCHGCMFASTCALL_INIT(&pReq->Hdr, VbglR0PhysHeapGetPhysAddr(pReq), &pReq->Call, g_SfClient.idClient,
+ SHFL_FN_WRITE, SHFL_CPARMS_WRITE,
+ RT_UOFFSETOF_DYN(VBOXSFWRITEPGLSTREQ, PgLst.aPages[cPages]));
+
+ pReq->Parms.id32Root.type = VMMDevHGCMParmType_32bit;
+ pReq->Parms.id32Root.u.value32 = idRoot;
+
+ pReq->Parms.u64Handle.type = VMMDevHGCMParmType_64bit;
+ pReq->Parms.u64Handle.u.value64 = hHostFile;
+
+ pReq->Parms.off64Write.type = VMMDevHGCMParmType_64bit;
+ pReq->Parms.off64Write.u.value64 = offWrite;
+
+ pReq->Parms.cb32Write.type = VMMDevHGCMParmType_32bit;
+ pReq->Parms.cb32Write.u.value32 = cbToWrite;
+
+ pReq->Parms.pBuf.type = g_fHostFeatures & VMMDEV_HVF_HGCM_NO_BOUNCE_PAGE_LIST
+ ? VMMDevHGCMParmType_NoBouncePageList : VMMDevHGCMParmType_PageList;;
+ pReq->Parms.pBuf.u.PageList.size = cbToWrite;
+ pReq->Parms.pBuf.u.PageList.offset = RT_UOFFSETOF(VBOXSFWRITEPGLSTREQ, PgLst) - sizeof(VBGLIOCIDCHGCMFASTCALL);
+ pReq->PgLst.flags = VBOX_HGCM_F_PARM_DIRECTION_TO_HOST;
+ pReq->PgLst.cPages = (uint16_t)cPages;
+ AssertReturn(cPages <= UINT16_MAX, VERR_OUT_OF_RANGE);
+ /* caller sets offset */
+
+ int vrc = VbglR0HGCMFastCall(g_SfClient.handle, &pReq->Hdr,
+ RT_UOFFSETOF_DYN(VBOXSFWRITEPGLSTREQ, PgLst.aPages[cPages]));
+ if (RT_SUCCESS(vrc))
+ vrc = pReq->Call.header.result;
+ return vrc;
+}
+
+
+/**
+ * SHFL_FN_WRITE request using a physically contiguous buffer.
+ */
+DECLINLINE(int) VbglR0SfHostReqWriteContig(SHFLROOT idRoot, VBOXSFWRITEPGLSTREQ *pReq, uint64_t hHostFile,
+ uint64_t offWrite, uint32_t cbToWrite, void const *pvBuffer, RTGCPHYS64 PhysBuffer)
+{
+ VBGLIOCIDCHGCMFASTCALL_INIT(&pReq->Hdr, VbglR0PhysHeapGetPhysAddr(pReq), &pReq->Call, g_SfClient.idClient,
+ SHFL_FN_WRITE, SHFL_CPARMS_WRITE, RT_UOFFSETOF_DYN(VBOXSFWRITEPGLSTREQ, PgLst.aPages[1]));
+
+ pReq->Parms.id32Root.type = VMMDevHGCMParmType_32bit;
+ pReq->Parms.id32Root.u.value32 = idRoot;
+
+ pReq->Parms.u64Handle.type = VMMDevHGCMParmType_64bit;
+ pReq->Parms.u64Handle.u.value64 = hHostFile;
+
+ pReq->Parms.off64Write.type = VMMDevHGCMParmType_64bit;
+ pReq->Parms.off64Write.u.value64 = offWrite;
+
+ pReq->Parms.cb32Write.type = VMMDevHGCMParmType_32bit;
+ pReq->Parms.cb32Write.u.value32 = cbToWrite;
+
+ if (g_fHostFeatures & VMMDEV_HVF_HGCM_CONTIGUOUS_PAGE_LIST)
+ {
+ pReq->Parms.pBuf.type = VMMDevHGCMParmType_ContiguousPageList;
+ pReq->Parms.pBuf.u.PageList.size = cbToWrite;
+ pReq->Parms.pBuf.u.PageList.offset = RT_UOFFSETOF(VBOXSFWRITEPGLSTREQ, PgLst) - sizeof(VBGLIOCIDCHGCMFASTCALL);
+ pReq->PgLst.flags = VBOX_HGCM_F_PARM_DIRECTION_TO_HOST;
+ pReq->PgLst.offFirstPage = (uint16_t)(PhysBuffer & PAGE_OFFSET_MASK);
+ pReq->PgLst.cPages = 1;
+ pReq->PgLst.aPages[0] = PhysBuffer & ~(RTGCPHYS64)PAGE_OFFSET_MASK;
+ }
+ else
+ {
+ pReq->Parms.pBuf.type = VMMDevHGCMParmType_LinAddr_In;
+ pReq->Parms.pBuf.u.LinAddr.cb = cbToWrite;
+ pReq->Parms.pBuf.u.LinAddr.uAddr = (uintptr_t)pvBuffer;
+ }
+
+ int vrc = VbglR0HGCMFastCall(g_SfClient.handle, &pReq->Hdr, RT_UOFFSETOF_DYN(VBOXSFWRITEPGLSTREQ, PgLst.aPages[1]));
+ if (RT_SUCCESS(vrc))
+ vrc = pReq->Call.header.result;
+ return vrc;
+}
+
+
+/** Request structure for VbglR0SfHostReqCopyFilePart. */
+typedef struct VBOXSFCOPYFILEPARTREQ
+{
+ VBGLIOCIDCHGCMFASTCALL Hdr;
+ VMMDevHGCMCall Call;
+ VBoxSFParmCopyFilePart Parms;
+} VBOXSFCOPYFILEPARTREQ;
+
+/**
+ * SHFL_FN_CREATE request.
+ */
+DECLINLINE(int) VbglR0SfHostReqCopyFilePart(SHFLROOT idRootSrc, SHFLHANDLE hHostFileSrc, uint64_t offSrc,
+ SHFLROOT idRootDst, SHFLHANDLE hHostFileDst, uint64_t offDst,
+ uint64_t cbToCopy, uint32_t fFlags, VBOXSFCOPYFILEPARTREQ *pReq)
+{
+ VBGLIOCIDCHGCMFASTCALL_INIT(&pReq->Hdr, VbglR0PhysHeapGetPhysAddr(pReq), &pReq->Call, g_SfClient.idClient,
+ SHFL_FN_COPY_FILE_PART, SHFL_CPARMS_COPY_FILE_PART, sizeof(*pReq));
+
+ pReq->Parms.id32RootSrc.type = VMMDevHGCMParmType_32bit;
+ pReq->Parms.id32RootSrc.u.value32 = idRootSrc;
+
+ pReq->Parms.u64HandleSrc.type = VMMDevHGCMParmType_64bit;
+ pReq->Parms.u64HandleSrc.u.value64 = hHostFileSrc;
+
+ pReq->Parms.off64Src.type = VMMDevHGCMParmType_64bit;
+ pReq->Parms.off64Src.u.value64 = offSrc;
+
+ pReq->Parms.id32RootDst.type = VMMDevHGCMParmType_32bit;
+ pReq->Parms.id32RootDst.u.value32 = idRootDst;
+
+ pReq->Parms.u64HandleDst.type = VMMDevHGCMParmType_64bit;
+ pReq->Parms.u64HandleDst.u.value64 = hHostFileDst;
+
+ pReq->Parms.off64Dst.type = VMMDevHGCMParmType_64bit;
+ pReq->Parms.off64Dst.u.value64 = offDst;
+
+ pReq->Parms.cb64ToCopy.type = VMMDevHGCMParmType_64bit;
+ pReq->Parms.cb64ToCopy.u.value64 = cbToCopy;
+
+ pReq->Parms.f32Flags.type = VMMDevHGCMParmType_32bit;
+ pReq->Parms.f32Flags.u.value32 = fFlags;
+
+ int vrc = VbglR0HGCMFastCall(g_SfClient.handle, &pReq->Hdr, sizeof(*pReq));
+ if (RT_SUCCESS(vrc))
+ vrc = pReq->Call.header.result;
+ return vrc;
+}
+
+
+
+/** Request structure for VbglR0SfHostReqListDirContig2x() and
+ * VbglR0SfHostReqListDir(). */
+typedef struct VBOXSFLISTDIRREQ
+{
+ VBGLIOCIDCHGCMFASTCALL Hdr;
+ VMMDevHGCMCall Call;
+ VBoxSFParmList Parms;
+ HGCMPageListInfo StrPgLst;
+ HGCMPageListInfo BufPgLst;
+} VBOXSFLISTDIRREQ;
+
+/**
+ * SHFL_FN_LIST request with separate string buffer and buffers for entries,
+ * both physically contiguous allocations.
+ */
+DECLINLINE(int) VbglR0SfHostReqListDirContig2x(SHFLROOT idRoot, VBOXSFLISTDIRREQ *pReq, uint64_t hHostDir,
+ PSHFLSTRING pFilter, RTGCPHYS64 PhysFilter, uint32_t fFlags,
+ PSHFLDIRINFO pBuffer, RTGCPHYS64 PhysBuffer, uint32_t cbBuffer)
+{
+ VBGLIOCIDCHGCMFASTCALL_INIT(&pReq->Hdr, VbglR0PhysHeapGetPhysAddr(pReq), &pReq->Call, g_SfClient.idClient,
+ SHFL_FN_LIST, SHFL_CPARMS_LIST, sizeof(*pReq));
+
+ pReq->Parms.id32Root.type = VMMDevHGCMParmType_32bit;
+ pReq->Parms.id32Root.u.value32 = idRoot;
+
+ pReq->Parms.u64Handle.type = VMMDevHGCMParmType_64bit;
+ pReq->Parms.u64Handle.u.value64 = hHostDir;
+
+ pReq->Parms.f32Flags.type = VMMDevHGCMParmType_32bit;
+ pReq->Parms.f32Flags.u.value32 = fFlags;
+
+ pReq->Parms.cb32Buffer.type = VMMDevHGCMParmType_32bit;
+ pReq->Parms.cb32Buffer.u.value32 = cbBuffer;
+
+ if (g_fHostFeatures & VMMDEV_HVF_HGCM_CONTIGUOUS_PAGE_LIST)
+ {
+ pReq->Parms.pStrFilter.type = VMMDevHGCMParmType_ContiguousPageList;
+ pReq->Parms.pStrFilter.u.PageList.offset = RT_UOFFSETOF(VBOXSFLISTDIRREQ, StrPgLst) - sizeof(VBGLIOCIDCHGCMFASTCALL);
+ pReq->StrPgLst.flags = VBOX_HGCM_F_PARM_DIRECTION_TO_HOST;
+ pReq->StrPgLst.cPages = 1;
+ if (pFilter)
+ {
+ pReq->Parms.pStrFilter.u.PageList.size = SHFLSTRING_HEADER_SIZE + pFilter->u16Size;
+ uint32_t const offFirstPage = (uint32_t)PhysFilter & PAGE_OFFSET_MASK;
+ pReq->StrPgLst.offFirstPage = (uint16_t)offFirstPage;
+ pReq->StrPgLst.aPages[0] = PhysFilter - offFirstPage;
+ }
+ else
+ {
+ pReq->Parms.pStrFilter.u.PageList.size = 0;
+ pReq->StrPgLst.offFirstPage = 0;
+ pReq->StrPgLst.aPages[0] = NIL_RTGCPHYS64;
+ }
+
+ pReq->Parms.pBuffer.type = VMMDevHGCMParmType_ContiguousPageList;
+ pReq->Parms.pBuffer.u.PageList.offset = RT_UOFFSETOF(VBOXSFLISTDIRREQ, BufPgLst) - sizeof(VBGLIOCIDCHGCMFASTCALL);
+ pReq->Parms.pBuffer.u.PageList.size = cbBuffer;
+ pReq->BufPgLst.flags = VBOX_HGCM_F_PARM_DIRECTION_FROM_HOST;
+ pReq->BufPgLst.cPages = 1;
+ uint32_t const offFirstPage = (uint32_t)PhysBuffer & PAGE_OFFSET_MASK;
+ pReq->BufPgLst.offFirstPage = (uint16_t)offFirstPage;
+ pReq->BufPgLst.aPages[0] = PhysBuffer - offFirstPage;
+ }
+ else
+ {
+ pReq->Parms.pStrFilter.type = VMMDevHGCMParmType_LinAddr_In;
+ pReq->Parms.pStrFilter.u.LinAddr.cb = pFilter ? SHFLSTRING_HEADER_SIZE + pFilter->u16Size : 0;
+ pReq->Parms.pStrFilter.u.LinAddr.uAddr = (uintptr_t)pFilter;
+
+ pReq->Parms.pBuffer.type = VMMDevHGCMParmType_LinAddr_Out;
+ pReq->Parms.pBuffer.u.LinAddr.cb = cbBuffer;
+ pReq->Parms.pBuffer.u.LinAddr.uAddr = (uintptr_t)pBuffer;
+ }
+
+ pReq->Parms.f32More.type = VMMDevHGCMParmType_32bit;
+ pReq->Parms.f32More.u.value32 = 0;
+
+ pReq->Parms.c32Entries.type = VMMDevHGCMParmType_32bit;
+ pReq->Parms.c32Entries.u.value32 = 0;
+
+ int vrc = VbglR0HGCMFastCall(g_SfClient.handle, &pReq->Hdr, sizeof(*pReq));
+ if (RT_SUCCESS(vrc))
+ vrc = pReq->Call.header.result;
+ return vrc;
+}
+
+/**
+ * SHFL_FN_LIST request with separate string buffer and buffers for entries,
+ * both allocated on the physical heap.
+ */
+DECLINLINE(int) VbglR0SfHostReqListDir(SHFLROOT idRoot, VBOXSFLISTDIRREQ *pReq, uint64_t hHostDir,
+ PSHFLSTRING pFilter, uint32_t fFlags, PSHFLDIRINFO pBuffer, uint32_t cbBuffer)
+{
+ return VbglR0SfHostReqListDirContig2x(idRoot,
+ pReq,
+ hHostDir,
+ pFilter,
+ pFilter ? VbglR0PhysHeapGetPhysAddr(pFilter) : NIL_RTGCPHYS64,
+ fFlags,
+ pBuffer,
+ VbglR0PhysHeapGetPhysAddr(pBuffer),
+ cbBuffer);
+}
+
+
+/** Request structure for VbglR0SfHostReqReadLink. */
+typedef struct VBOXSFREADLINKREQ
+{
+ VBGLIOCIDCHGCMFASTCALL Hdr;
+ VMMDevHGCMCall Call;
+ VBoxSFParmReadLink Parms;
+ HGCMPageListInfo PgLst;
+ SHFLSTRING StrPath;
+} VBOXSFREADLINKREQ;
+
+/**
+ * SHFL_FN_READLINK request.
+ *
+ * @note Buffer contains UTF-8 characters on success, regardless of the
+ * UTF-8/UTF-16 setting of the connection.
+ */
+DECLINLINE(int) VbglR0SfHostReqReadLinkContig(SHFLROOT idRoot, void *pvBuffer, RTGCPHYS64 PhysBuffer, uint32_t cbBuffer,
+ VBOXSFREADLINKREQ *pReq)
+{
+ uint32_t const cbReq = g_fHostFeatures & VMMDEV_HVF_HGCM_EMBEDDED_BUFFERS
+ ? RT_UOFFSETOF(VBOXSFREADLINKREQ, StrPath.String) + pReq->StrPath.u16Size
+ : cbBuffer <= PAGE_SIZE - (PhysBuffer & PAGE_OFFSET_MASK)
+ || (g_fHostFeatures & VMMDEV_HVF_HGCM_CONTIGUOUS_PAGE_LIST)
+ ? RT_UOFFSETOF(VBOXSFREADLINKREQ, StrPath.String)
+ : RT_UOFFSETOF(VBOXSFREADLINKREQ, PgLst);
+ VBGLIOCIDCHGCMFASTCALL_INIT(&pReq->Hdr, VbglR0PhysHeapGetPhysAddr(pReq), &pReq->Call, g_SfClient.idClient,
+ SHFL_FN_READLINK, SHFL_CPARMS_READLINK, cbReq);
+
+ pReq->Parms.id32Root.type = VMMDevHGCMParmType_32bit;
+ pReq->Parms.id32Root.u.value32 = idRoot;
+
+ if (g_fHostFeatures & VMMDEV_HVF_HGCM_EMBEDDED_BUFFERS)
+ {
+ pReq->Parms.pStrPath.type = VMMDevHGCMParmType_Embedded;
+ pReq->Parms.pStrPath.u.Embedded.cbData = SHFLSTRING_HEADER_SIZE + pReq->StrPath.u16Size;
+ pReq->Parms.pStrPath.u.Embedded.offData = RT_UOFFSETOF(VBOXSFREADLINKREQ, StrPath)
+ - sizeof(VBGLIOCIDCHGCMFASTCALL);
+ pReq->Parms.pStrPath.u.Embedded.fFlags = VBOX_HGCM_F_PARM_DIRECTION_TO_HOST;
+ }
+ else
+ {
+ pReq->Parms.pStrPath.type = VMMDevHGCMParmType_LinAddr_In;
+ pReq->Parms.pStrPath.u.LinAddr.cb = SHFLSTRING_HEADER_SIZE + pReq->StrPath.u16Size;
+ pReq->Parms.pStrPath.u.LinAddr.uAddr = (uintptr_t)&pReq->StrPath;
+ }
+
+ if ( cbBuffer <= PAGE_SIZE - (PhysBuffer & PAGE_OFFSET_MASK)
+ || (g_fHostFeatures & VMMDEV_HVF_HGCM_CONTIGUOUS_PAGE_LIST))
+ {
+ pReq->Parms.pBuffer.type = cbBuffer <= PAGE_SIZE - (PhysBuffer & PAGE_OFFSET_MASK)
+ ? VMMDevHGCMParmType_PageList
+ : VMMDevHGCMParmType_ContiguousPageList;
+ pReq->Parms.pBuffer.u.PageList.size = cbBuffer;
+ pReq->Parms.pBuffer.u.PageList.offset = RT_UOFFSETOF(VBOXSFREADLINKREQ, PgLst)
+ - sizeof(VBGLIOCIDCHGCMFASTCALL);
+ pReq->PgLst.flags = VBOX_HGCM_F_PARM_DIRECTION_FROM_HOST;
+ pReq->PgLst.offFirstPage = (uint16_t)PhysBuffer & (uint16_t)(PAGE_OFFSET_MASK);
+ pReq->PgLst.aPages[0] = PhysBuffer & ~(RTGCPHYS64)PAGE_OFFSET_MASK;
+ pReq->PgLst.cPages = 1;
+ }
+ else
+ {
+ pReq->Parms.pBuffer.type = VMMDevHGCMParmType_LinAddr_Out;
+ pReq->Parms.pBuffer.u.LinAddr.cb = cbBuffer;
+ pReq->Parms.pBuffer.u.LinAddr.uAddr = (uintptr_t)pvBuffer;
+ }
+
+ int vrc = VbglR0HGCMFastCall(g_SfClient.handle, &pReq->Hdr, cbReq);
+ if (RT_SUCCESS(vrc))
+ vrc = pReq->Call.header.result;
+ return vrc;
+}
+
+/**
+ * SHFL_FN_READLINK request, simplified version.
+ *
+ *
+ * @note Buffer contains UTF-8 characters on success, regardless of the
+ * UTF-8/UTF-16 setting of the connection.
+ */
+DECLINLINE(int) VbglR0SfHostReqReadLinkContigSimple(SHFLROOT idRoot, const char *pszPath, size_t cchPath, void *pvBuf,
+ RTGCPHYS64 PhysBuffer, uint32_t cbBuffer)
+{
+ if (cchPath < _64K - 1)
+ {
+ VBOXSFREADLINKREQ *pReq = (VBOXSFREADLINKREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF(VBOXSFREADLINKREQ, StrPath.String)
+ + SHFLSTRING_HEADER_SIZE + (uint32_t)cchPath);
+ if (pReq)
+ {
+ pReq->StrPath.u16Length = (uint16_t)cchPath;
+ pReq->StrPath.u16Size = (uint16_t)cchPath + 1;
+ memcpy(pReq->StrPath.String.ach, pszPath, cchPath);
+ pReq->StrPath.String.ach[cchPath] = '\0';
+
+ {
+ int vrc = VbglR0SfHostReqReadLinkContig(idRoot, pvBuf, PhysBuffer, cbBuffer, pReq);
+ VbglR0PhysHeapFree(pReq);
+ return vrc;
+ }
+ }
+ return VERR_NO_MEMORY;
+ }
+ return VERR_FILENAME_TOO_LONG;
+}
+
+
+/** Request structure for VbglR0SfHostReqCreateSymlink. */
+typedef struct VBOXSFCREATESYMLINKREQ
+{
+ VBGLIOCIDCHGCMFASTCALL Hdr;
+ VMMDevHGCMCall Call;
+ VBoxSFParmCreateSymlink Parms;
+ HGCMPageListInfo PgLstTarget;
+ SHFLFSOBJINFO ObjInfo;
+ SHFLSTRING StrSymlinkPath;
+} VBOXSFCREATESYMLINKREQ;
+
+/**
+ * SHFL_FN_SYMLINK request.
+ *
+ * Caller fills in the symlink string and supplies a physical contiguous
+ * target string
+ */
+DECLINLINE(int) VbglR0SfHostReqCreateSymlinkContig(SHFLROOT idRoot, PCSHFLSTRING pStrTarget, RTGCPHYS64 PhysTarget,
+ VBOXSFCREATESYMLINKREQ *pReq)
+{
+ uint32_t const cbTarget = SHFLSTRING_HEADER_SIZE + pStrTarget->u16Size;
+ uint32_t const cbReq = g_fHostFeatures & VMMDEV_HVF_HGCM_EMBEDDED_BUFFERS
+ ? RT_UOFFSETOF(VBOXSFCREATESYMLINKREQ, StrSymlinkPath.String) + pReq->StrSymlinkPath.u16Size
+ : RT_UOFFSETOF(VBOXSFCREATESYMLINKREQ, ObjInfo) /*simplified*/;
+ VBGLIOCIDCHGCMFASTCALL_INIT(&pReq->Hdr, VbglR0PhysHeapGetPhysAddr(pReq), &pReq->Call, g_SfClient.idClient,
+ SHFL_FN_SYMLINK, SHFL_CPARMS_SYMLINK, cbReq);
+
+ pReq->Parms.id32Root.type = VMMDevHGCMParmType_32bit;
+ pReq->Parms.id32Root.u.value32 = idRoot;
+
+ if (g_fHostFeatures & VMMDEV_HVF_HGCM_EMBEDDED_BUFFERS)
+ {
+ pReq->Parms.pStrSymlink.type = VMMDevHGCMParmType_Embedded;
+ pReq->Parms.pStrSymlink.u.Embedded.cbData = SHFLSTRING_HEADER_SIZE + pReq->StrSymlinkPath.u16Size;
+ pReq->Parms.pStrSymlink.u.Embedded.offData = RT_UOFFSETOF(VBOXSFCREATESYMLINKREQ, StrSymlinkPath)
+ - sizeof(VBGLIOCIDCHGCMFASTCALL);
+ pReq->Parms.pStrSymlink.u.Embedded.fFlags = VBOX_HGCM_F_PARM_DIRECTION_TO_HOST;
+ }
+ else
+ {
+ pReq->Parms.pStrSymlink.type = VMMDevHGCMParmType_LinAddr_In;
+ pReq->Parms.pStrSymlink.u.LinAddr.cb = SHFLSTRING_HEADER_SIZE + pReq->StrSymlinkPath.u16Size;
+ pReq->Parms.pStrSymlink.u.LinAddr.uAddr = (uintptr_t)&pReq->StrSymlinkPath;
+ }
+
+ if ( cbTarget <= PAGE_SIZE - (PhysTarget & PAGE_OFFSET_MASK)
+ || (g_fHostFeatures & VMMDEV_HVF_HGCM_CONTIGUOUS_PAGE_LIST))
+ {
+ pReq->Parms.pStrTarget.type = cbTarget <= PAGE_SIZE - (PhysTarget & PAGE_OFFSET_MASK)
+ ? VMMDevHGCMParmType_PageList
+ : VMMDevHGCMParmType_ContiguousPageList;
+ pReq->Parms.pStrTarget.u.PageList.size = cbTarget;
+ pReq->Parms.pStrTarget.u.PageList.offset = RT_UOFFSETOF(VBOXSFCREATESYMLINKREQ, PgLstTarget)
+ - sizeof(VBGLIOCIDCHGCMFASTCALL);
+ pReq->PgLstTarget.flags = VBOX_HGCM_F_PARM_DIRECTION_TO_HOST;
+ pReq->PgLstTarget.offFirstPage = (uint16_t)PhysTarget & (uint16_t)(PAGE_OFFSET_MASK);
+ pReq->PgLstTarget.aPages[0] = PhysTarget & ~(RTGCPHYS64)PAGE_OFFSET_MASK;
+ pReq->PgLstTarget.cPages = 1;
+ }
+ else
+ {
+ pReq->Parms.pStrTarget.type = VMMDevHGCMParmType_LinAddr_In;
+ pReq->Parms.pStrTarget.u.LinAddr.cb = cbTarget;
+ pReq->Parms.pStrTarget.u.LinAddr.uAddr = (uintptr_t)pStrTarget;
+ }
+
+ if (g_fHostFeatures & VMMDEV_HVF_HGCM_EMBEDDED_BUFFERS)
+ {
+ pReq->Parms.pInfo.type = VMMDevHGCMParmType_Embedded;
+ pReq->Parms.pInfo.u.Embedded.cbData = sizeof(pReq->ObjInfo);
+ pReq->Parms.pInfo.u.Embedded.offData = RT_UOFFSETOF(VBOXSFCREATESYMLINKREQ, ObjInfo)
+ - sizeof(VBGLIOCIDCHGCMFASTCALL);
+ pReq->Parms.pInfo.u.Embedded.fFlags = VBOX_HGCM_F_PARM_DIRECTION_FROM_HOST;
+ }
+ else
+ {
+ pReq->Parms.pInfo.type = VMMDevHGCMParmType_LinAddr_Out;
+ pReq->Parms.pInfo.u.LinAddr.cb = sizeof(pReq->ObjInfo);
+ pReq->Parms.pInfo.u.LinAddr.uAddr = (uintptr_t)&pReq->ObjInfo;
+ }
+
+ int vrc = VbglR0HGCMFastCall(g_SfClient.handle, &pReq->Hdr, cbReq);
+ if (RT_SUCCESS(vrc))
+ vrc = pReq->Call.header.result;
+ return vrc;
+}
+
+/** @} */
+
+#endif /* !VBOX_INCLUDED_VBoxGuestLibSharedFoldersInline_h */
+
diff --git a/ubuntu/vbox/vboxsf/include/VBox/VMMDev.h b/ubuntu/vbox/vboxsf/include/VBox/VMMDev.h
index 3212e9941a7f..5d5c42861d65 100644
--- a/ubuntu/vbox/vboxsf/include/VBox/VMMDev.h
+++ b/ubuntu/vbox/vboxsf/include/VBox/VMMDev.h
@@ -162,12 +162,17 @@ typedef enum VMMDevRequestType
#ifdef VBOX_WITH_HGCM
VMMDevReq_HGCMConnect = 60,
VMMDevReq_HGCMDisconnect = 61,
-#ifdef VBOX_WITH_64_BITS_GUESTS
VMMDevReq_HGCMCall32 = 62,
VMMDevReq_HGCMCall64 = 63,
-#else
- VMMDevReq_HGCMCall = 62,
-#endif /* VBOX_WITH_64_BITS_GUESTS */
+# ifdef IN_GUEST
+# if ARCH_BITS == 64
+ VMMDevReq_HGCMCall = VMMDevReq_HGCMCall64,
+# elif ARCH_BITS == 32 || ARCH_BITS == 16
+ VMMDevReq_HGCMCall = VMMDevReq_HGCMCall32,
+# else
+# error "Unsupported ARCH_BITS"
+# endif
+# endif
VMMDevReq_HGCMCancel = 64,
VMMDevReq_HGCMCancel2 = 65,
#endif
@@ -198,28 +203,6 @@ typedef enum VMMDevRequestType
VMMDevReq_SizeHack = 0x7fffffff
} VMMDevRequestType;
-#ifdef VBOX_WITH_64_BITS_GUESTS
-/*
- * Constants and structures are redefined for the guest.
- *
- * Host code MUST always use either *32 or *64 variant explicitely.
- * Host source code will use VBOX_HGCM_HOST_CODE define to catch undefined
- * data types and constants.
- *
- * This redefinition means that the new additions builds will use
- * the *64 or *32 variants depending on the current architecture bit count (ARCH_BITS).
- */
-# ifndef VBOX_HGCM_HOST_CODE
-# if ARCH_BITS == 64
-# define VMMDevReq_HGCMCall VMMDevReq_HGCMCall64
-# elif ARCH_BITS == 32 || ARCH_BITS == 16
-# define VMMDevReq_HGCMCall VMMDevReq_HGCMCall32
-# else
-# error "Unsupported ARCH_BITS"
-# endif
-# endif /* !VBOX_HGCM_HOST_CODE */
-#endif /* VBOX_WITH_64_BITS_GUESTS */
-
/** Version of VMMDevRequestHeader structure. */
#define VMMDEV_REQUEST_HEADER_VERSION (0x10001)
@@ -296,9 +279,8 @@ AssertCompileSize(VMMDevRequestHeader, 24);
/** Requestor process belongs to user on the physical console, but cannot
* ascertain that it is associated with that login. */
#define VMMDEV_REQUESTOR_CON_USER UINT32_C(0x00000030)
-/** Requestor process belongs to user on the physical console, but cannot
- * ascertain that it is associated with that login. */
-#define VMMDEV_REQUESTOR_CON_MASK UINT32_C(0x00000040)
+/** Mask the physical console state of the request. */
+#define VMMDEV_REQUESTOR_CON_MASK UINT32_C(0x00000030)
/** Requestor is member of special VirtualBox user group (not on windows). */
#define VMMDEV_REQUESTOR_GRP_VBOX UINT32_C(0x00000080)
@@ -548,6 +530,8 @@ AssertCompileSize(VMMDevReqHostVersion, 24+16);
#define VMMDEV_HVF_HGCM_EMBEDDED_BUFFERS RT_BIT_32(1)
/** HGCM supports the contiguous page list parameter type. */
#define VMMDEV_HVF_HGCM_CONTIGUOUS_PAGE_LIST RT_BIT_32(2)
+/** HGCM supports the no-bounce page list parameter type. */
+#define VMMDEV_HVF_HGCM_NO_BOUNCE_PAGE_LIST RT_BIT_32(3)
/** VMMDev supports fast IRQ acknowledgements. */
#define VMMDEV_HVF_FAST_IRQ_ACK RT_BIT_32(31)
/** @} */
@@ -1625,7 +1609,7 @@ AssertCompileSize(VMMDevHGCMDisconnect, 32+4);
/**
* HGCM call request structure.
*
- * Used by VMMDevReq_HGCMCall, VMMDevReq_HGCMCall32 and VMMDevReq_HGCMCall64.
+ * Used by VMMDevReq_HGCMCall32 and VMMDevReq_HGCMCall64.
*/
typedef struct
{
@@ -1648,10 +1632,11 @@ AssertCompileSize(VMMDevHGCMCall, 32+12);
#define VBOX_HGCM_F_PARM_DIRECTION_TO_HOST UINT32_C(0x00000001)
#define VBOX_HGCM_F_PARM_DIRECTION_FROM_HOST UINT32_C(0x00000002)
#define VBOX_HGCM_F_PARM_DIRECTION_BOTH UINT32_C(0x00000003)
+#define VBOX_HGCM_F_PARM_DIRECTION_MASK UINT32_C(0x00000003)
/** Macro for validating that the specified flags are valid. */
#define VBOX_HGCM_F_PARM_ARE_VALID(fFlags) \
- ( (fFlags) > VBOX_HGCM_F_PARM_DIRECTION_NONE \
- && (fFlags) <= VBOX_HGCM_F_PARM_DIRECTION_BOTH )
+ ( ((fFlags) & VBOX_HGCM_F_PARM_DIRECTION_MASK) \
+ && !((fFlags) & ~VBOX_HGCM_F_PARM_DIRECTION_MASK) )
/** @} */
/**
@@ -1780,15 +1765,12 @@ DECLINLINE(size_t) vmmdevGetRequestSize(VMMDevRequestType requestType)
return sizeof(VMMDevHGCMConnect);
case VMMDevReq_HGCMDisconnect:
return sizeof(VMMDevHGCMDisconnect);
-#ifdef VBOX_WITH_64_BITS_GUESTS
case VMMDevReq_HGCMCall32:
return sizeof(VMMDevHGCMCall);
+# ifdef VBOX_WITH_64_BITS_GUESTS
case VMMDevReq_HGCMCall64:
return sizeof(VMMDevHGCMCall);
-#else
- case VMMDevReq_HGCMCall:
- return sizeof(VMMDevHGCMCall);
-#endif /* VBOX_WITH_64_BITS_GUESTS */
+# endif
case VMMDevReq_HGCMCancel:
return sizeof(VMMDevHGCMCancel);
#endif /* VBOX_WITH_HGCM */
diff --git a/ubuntu/vbox/vboxsf/include/VBox/VMMDevCoreTypes.h b/ubuntu/vbox/vboxsf/include/VBox/VMMDevCoreTypes.h
index af024c2772d5..c8d5b60367dc 100644
--- a/ubuntu/vbox/vboxsf/include/VBox/VMMDevCoreTypes.h
+++ b/ubuntu/vbox/vboxsf/include/VBox/VMMDevCoreTypes.h
@@ -278,6 +278,7 @@ typedef enum
VMMDevHGCMParmType_PageList = 10, /**< Physical addresses of locked pages for a buffer. */
VMMDevHGCMParmType_Embedded = 11, /**< Small buffer embedded in request. */
VMMDevHGCMParmType_ContiguousPageList = 12, /**< Like PageList but with physically contiguous memory, so only one page entry. */
+ VMMDevHGCMParmType_NoBouncePageList = 13, /**< Like PageList but host function requires no bounce buffering. */
VMMDevHGCMParmType_SizeHack = 0x7fffffff
} HGCMFunctionParameterType;
AssertCompileSize(HGCMFunctionParameterType, 4);
diff --git a/ubuntu/vbox/vboxsf/include/VBox/err.h b/ubuntu/vbox/vboxsf/include/VBox/err.h
index 20e8432b25f1..c81ebc4de3eb 100644
--- a/ubuntu/vbox/vboxsf/include/VBox/err.h
+++ b/ubuntu/vbox/vboxsf/include/VBox/err.h
@@ -2117,6 +2117,10 @@
/** The behavior of the instruction/operation is modified/needs modification
* in VMX non-root mode. */
#define VINF_VMX_MODIFIES_BEHAVIOR 4036
+/** VMLAUNCH/VMRESUME succeeded, can enter nested-guest execution. */
+#define VINF_VMX_VMLAUNCH_VMRESUME 4037
+/** VT-x VMCS launch state invalid. */
+#define VERR_VMX_INVALID_VMCS_LAUNCH_STATE (-4038)
/** @} */
@@ -2718,10 +2722,14 @@
#define VWRN_GSTCTL_OBJECTSTATE_CHANGED 6220
/** Guest process is in a wrong state. */
#define VERR_GSTCTL_PROCESS_WRONG_STATE (-6221)
-/** Maximum objects has been reached. */
-#define VERR_GSTCTL_MAX_OBJECTS_REACHED (-6222)
+/** Maximum (context ID) sessions have been reached. */
+#define VERR_GSTCTL_MAX_CID_SESSIONS_REACHED (-6222)
+/** Maximum (context ID) objects have been reached. */
+#define VERR_GSTCTL_MAX_CID_OBJECTS_REACHED (-6223)
+/** Maximum (context ID object) count has been reached. */
+#define VERR_GSTCTL_MAX_CID_COUNT_REACHED (-6224)
/** Started guest process terminated with an exit code <> 0. */
-#define VERR_GSTCTL_PROCESS_EXIT_CODE (-6223)
+#define VERR_GSTCTL_PROCESS_EXIT_CODE (-6225)
/** @} */
diff --git a/ubuntu/vbox/vboxsf/include/VBox/shflsvc.h b/ubuntu/vbox/vboxsf/include/VBox/shflsvc.h
index 5e49f24ba896..b34261c1ec4f 100644
--- a/ubuntu/vbox/vboxsf/include/VBox/shflsvc.h
+++ b/ubuntu/vbox/vboxsf/include/VBox/shflsvc.h
@@ -118,7 +118,7 @@
#define SHFL_FN_MAP_FOLDER (17)
/** Read symlink destination.
* @since VBox 4.0 */
-#define SHFL_FN_READLINK (18)
+#define SHFL_FN_READLINK (18) /**< @todo rename to SHFL_FN_READ_LINK (see struct capitalization) */
/** Create symlink.
* @since VBox 4.0 */
#define SHFL_FN_SYMLINK (19)
@@ -138,6 +138,17 @@
/** Sets the file size.
* @since VBox 6.0 */
#define SHFL_FN_SET_FILE_SIZE (24)
+/** Queries supported features.
+ * @since VBox 6.0.6 */
+#define SHFL_FN_QUERY_FEATURES (25)
+/** Copies a file to another.
+ * @since VBox 6.0.6 */
+#define SHFL_FN_COPY_FILE (26)
+/** Copies part of a file to another.
+ * @since VBox 6.0.6 */
+#define SHFL_FN_COPY_FILE_PART (27)
+/** The last function number. */
+#define SHFL_FN_LAST SHFL_FN_COPY_FILE_PART
/** @} */
@@ -1348,7 +1359,12 @@ typedef struct VBoxSFParmWrite
HGCMFunctionParameter id32Root;
/** value64, in: SHFLHANDLE of object to write to. */
HGCMFunctionParameter u64Handle;
- /** value64, in: Offset to start writing at. */
+ /** value64, in/out: Offset to start writing at / New offset.
+ * @note The new offset isn't necessarily off + cb for files opened with
+ * SHFL_CF_ACCESS_APPEND since other parties (host programs, other VMs,
+ * other computers) could have extended the file since the last time the
+ * guest got a fresh size statistic. So, this helps the guest avoiding
+ * a stat call to check the actual size. */
HGCMFunctionParameter off64Write;
/** value32, in/out: How much to try write / Actually written. */
HGCMFunctionParameter cb32Write;
@@ -1371,8 +1387,13 @@ typedef struct _VBoxSFWrite
*/
HGCMFunctionParameter handle;
- /** value64, in:
- * Offset to write to.
+ /** value64, in/out:
+ * Offset to write to/New offset.
+ * @note The new offset isn't necessarily off + cb for files opened with
+ * SHFL_CF_ACCESS_APPEND since other parties (host programs, other VMs,
+ * other computers) could have extended the file since the last time the
+ * guest got a fresh size statistic. So, this helps the guest avoiding
+ * a stat call to check the actual size.
*/
HGCMFunctionParameter offset;
@@ -1489,6 +1510,13 @@ typedef struct _VBoxSFFlush
/** @} */
+/** @name SHFL_FN_SET_UTF8
+ * @{ */
+/** NUmber of parameters for SHFL_FN_SET_UTF8. */
+#define SHFL_CPARMS_SET_UTF8 (0)
+/** @} */
+
+
/** @name SHFL_FN_LIST
* @remarks Listing information includes variable length RTDIRENTRY[EX]
* structures.
@@ -1517,10 +1545,10 @@ typedef struct VBoxSFParmList
* When SHFL_LIST_RETURN_ONE is not specfied, multiple record may be
* returned, deriving the entry size using SHFLDIRINFO::name.u16Size. */
HGCMFunctionParameter pBuffer;
- /** value32, out: Set to 1 if the listing is done, 0 if more entries.
+ /** value32, out: Set to 0 if the listing is done, 1 if there are more entries.
* @note Must be set to zero on call as it was declared in/out parameter and
* may be used as such again. */
- HGCMFunctionParameter f32Done;
+ HGCMFunctionParameter f32More;
/** value32, out: Number of entries returned. */
HGCMFunctionParameter c32Entries;
} VBoxSFParmList;
@@ -1584,6 +1612,22 @@ typedef struct _VBoxSFList
* @{
*/
+/** SHFL_FN_READLINK parameters. */
+typedef struct VBoxSFParmReadLink
+{
+ /** value32, in: SHFLROOT of the mapping which the symlink is read. */
+ HGCMFunctionParameter id32Root;
+ /** pointer, in: SHFLSTRING full path to the symlink. */
+ HGCMFunctionParameter pStrPath;
+ /** pointer, out: Buffer to place the symlink target into.
+ * @note Buffer contains UTF-8 characters on success, regardless of the
+ * UTF-8/UTF-16 setting of the connection. Will be zero terminated.
+ *
+ * @todo r=bird: This should've been a string!
+ * @todo r=bird: There should've been a byte count returned! */
+ HGCMFunctionParameter pBuffer;
+} VBoxSFParmReadLink;
+
/** Parameters structure. */
typedef struct _VBoxSFReadLink
{
@@ -1601,6 +1645,8 @@ typedef struct _VBoxSFReadLink
/** pointer, out:
* Buffer to place data to.
+ * @note Buffer contains UTF-8 characters on success, regardless of the
+ * UTF-8/UTF-16 setting of the connection. Will be zero terminated.
*/
HGCMFunctionParameter buffer;
@@ -1789,6 +1835,19 @@ typedef struct _VBoxSFRename
* @{
*/
+/** Parameters structure. */
+typedef struct VBoxSFParmCreateSymlink
+{
+ /** value32, in: SHFLROOT of the mapping the symlink should be created on. */
+ HGCMFunctionParameter id32Root;
+ /** pointer, in: SHFLSTRING giving the path to the symlink. */
+ HGCMFunctionParameter pStrSymlink;
+ /** pointer, in: SHFLSTRING giving the target. */
+ HGCMFunctionParameter pStrTarget;
+ /** pointer, out: SHFLFSOBJINFO buffer to be filled with info about the created symlink. */
+ HGCMFunctionParameter pInfo;
+} VBoxSFParmCreateSymlink;
+
/** Parameters structure. */
typedef struct _VBoxSFSymlink
{
@@ -1820,6 +1879,13 @@ typedef struct _VBoxSFSymlink
/** @} */
+/** @name SHFL_FN_SET_SYMLINKS
+ * @{ */
+/** NUmber of parameters for SHFL_FN_SET_SYMLINKS. */
+#define SHFL_CPARMS_SET_SYMLINKS (0)
+/** @} */
+
+
/** @name SHFL_FN_QUERY_MAP_INFO
* @{
*/
@@ -1909,6 +1975,75 @@ typedef struct VBoxSFParmSetFileSize
/** @} */
+/** @name SHFL_FN_QUERY_FEATURES
+ * @{ */
+/** SHFL_FN_QUERY_FEATURES parameters. */
+typedef struct VBoxSFParmQueryFeatures
+{
+ /** value64, out: Feature flags, SHFL_FEATURE_XXX. */
+ HGCMFunctionParameter f64Features;
+ /** value32, out: The ordinal of the last valid function */
+ HGCMFunctionParameter u32LastFunction;
+} VBoxSFParmQueryFeatures;
+/** Number of parameters for SHFL_FN_QUERY_FEATURES. */
+#define SHFL_CPARMS_QUERY_FEATURES (2)
+
+/** The write functions updates the file offset upon return.
+ * This can be helpful for files open in append mode. */
+#define SHFL_FEATURE_WRITE_UPDATES_OFFSET RT_BIT_64(0)
+/** @} */
+
+
+/** @name SHFL_FN_COPY_FILE
+ * @{ */
+/** SHFL_FN_COPY_FILE parameters. */
+typedef struct VBoxSFParmCopyFile
+{
+ /** value32, in: SHFLROOT of the mapping the source handle belongs to. */
+ HGCMFunctionParameter id32RootSrc;
+ /** pointer, in: SHFLSTRING giving the source file path. */
+ HGCMFunctionParameter pStrPathSrc;
+
+ /** value32, in: SHFLROOT of the mapping the destination handle belongs to. */
+ HGCMFunctionParameter id32RootDst;
+ /** pointer, in: SHFLSTRING giving the destination file path. */
+ HGCMFunctionParameter pStrPathDst;
+
+ /** value32, in: Reserved for the future, must be zero. */
+ HGCMFunctionParameter f32Flags;
+} VBoxSFParmCopyFile;
+/** Number of parameters for SHFL_FN_COPY_FILE. */
+#define SHFL_CPARMS_COPY_FILE (5)
+/** @} */
+
+
+/** @name SHFL_FN_COPY_FILE_PART
+ * @{ */
+/** SHFL_FN_COPY_FILE_PART parameters. */
+typedef struct VBoxSFParmCopyFilePar
+{
+ /** value32, in: SHFLROOT of the mapping the source handle belongs to. */
+ HGCMFunctionParameter id32RootSrc;
+ /** value64, in: SHFLHANDLE of the source file. */
+ HGCMFunctionParameter u64HandleSrc;
+ /** value64, in: The source file offset. */
+ HGCMFunctionParameter off64Src;
+
+ /** value32, in: SHFLROOT of the mapping the destination handle belongs to. */
+ HGCMFunctionParameter id32RootDst;
+ /** value64, in: SHFLHANDLE of the destination file. */
+ HGCMFunctionParameter u64HandleDst;
+ /** value64, in: The destination file offset. */
+ HGCMFunctionParameter off64Dst;
+
+ /** value64, in/out: The number of bytes to copy on input / bytes actually copied. */
+ HGCMFunctionParameter cb64ToCopy;
+ /** value32, in: Reserved for the future, must be zero. */
+ HGCMFunctionParameter f32Flags;
+} VBoxSFParmCopyFilePart;
+/** Number of parameters for SHFL_FN_COPY_FILE_PART. */
+#define SHFL_CPARMS_COPY_FILE_PART (8)
+/** @} */
diff --git a/ubuntu/vbox/vboxsf/include/iprt/assertcompile.h b/ubuntu/vbox/vboxsf/include/iprt/assertcompile.h
index 24a3c2f3e783..c12b5aa94dac 100644
--- a/ubuntu/vbox/vboxsf/include/iprt/assertcompile.h
+++ b/ubuntu/vbox/vboxsf/include/iprt/assertcompile.h
@@ -107,7 +107,11 @@ RT_C_DECLS_END
* @param expr Expression which should be true.
*/
#ifdef RTASSERT_HAVE_STATIC_ASSERT
-# define AssertCompile(expr) static_assert(!!(expr), #expr)
+# ifdef __cplusplus
+# define AssertCompile(expr) static_assert(!!(expr), #expr)
+# else
+# define AssertCompile(expr) _Static_assert(!!(expr), #expr)
+# endif
#else
# define AssertCompile(expr) AssertCompileNS(expr)
#endif
diff --git a/ubuntu/vbox/vboxsf/include/iprt/cdefs.h b/ubuntu/vbox/vboxsf/include/iprt/cdefs.h
index ce7ee6ae7eea..8aa1736a9a6b 100644
--- a/ubuntu/vbox/vboxsf/include/iprt/cdefs.h
+++ b/ubuntu/vbox/vboxsf/include/iprt/cdefs.h
@@ -2686,28 +2686,28 @@
/** @def RT_BYTE1
* Gets the first byte of something. */
-#define RT_BYTE1(a) ( (a) & 0xff )
+#define RT_BYTE1(a) ( (uint8_t)((a) & 0xff) )
/** @def RT_BYTE2
* Gets the second byte of something. */
-#define RT_BYTE2(a) ( ((a) >> 8) & 0xff )
+#define RT_BYTE2(a) ( (uint8_t)(((a) >> 8) & 0xff) )
/** @def RT_BYTE3
* Gets the second byte of something. */
-#define RT_BYTE3(a) ( ((a) >> 16) & 0xff )
+#define RT_BYTE3(a) ( (uint8_t)(((a) >> 16) & 0xff) )
/** @def RT_BYTE4
* Gets the fourth byte of something. */
-#define RT_BYTE4(a) ( ((a) >> 24) & 0xff )
+#define RT_BYTE4(a) ( (uint8_t)(((a) >> 24) & 0xff) )
/** @def RT_BYTE5
* Gets the fifth byte of something. */
-#define RT_BYTE5(a) ( ((a) >> 32) & 0xff )
+#define RT_BYTE5(a) ( (uint8_t)(((a) >> 32) & 0xff) )
/** @def RT_BYTE6
* Gets the sixth byte of something. */
-#define RT_BYTE6(a) ( ((a) >> 40) & 0xff )
+#define RT_BYTE6(a) ( (uint8_t)(((a) >> 40) & 0xff) )
/** @def RT_BYTE7
* Gets the seventh byte of something. */
-#define RT_BYTE7(a) ( ((a) >> 48) & 0xff )
+#define RT_BYTE7(a) ( (uint8_t)(((a) >> 48) & 0xff) )
/** @def RT_BYTE8
* Gets the eight byte of something. */
-#define RT_BYTE8(a) ( ((a) >> 56) & 0xff )
+#define RT_BYTE8(a) ( (uint8_t)(((a) >> 56) & 0xff) )
/** @def RT_LODWORD
diff --git a/ubuntu/vbox/vboxsf/include/iprt/err.h b/ubuntu/vbox/vboxsf/include/iprt/err.h
index 20a0d2a3e301..3de9a3334cdc 100644
--- a/ubuntu/vbox/vboxsf/include/iprt/err.h
+++ b/ubuntu/vbox/vboxsf/include/iprt/err.h
@@ -915,6 +915,8 @@
#define VERR_LDRELF_INVALID_RELOCATION_OFFSET (-639)
/** The ELF loader didn't find the symbol/string table for the image. */
#define VERR_LDRELF_NO_SYMBOL_OR_NO_STRING_TABS (-640)
+/** The ELF loader encountered an unterminated string table. */
+#define VERR_LDRELF_UNTERMINATED_STRING_TAB (-641)
/** Invalid link address. */
#define VERR_LDR_INVALID_LINK_ADDRESS (-647)
/** Invalid image relative virtual address. */
diff --git a/ubuntu/vbox/vboxsf/include/iprt/errcore.h b/ubuntu/vbox/vboxsf/include/iprt/errcore.h
index 1a771b9d0286..88f95c83e82e 100644
--- a/ubuntu/vbox/vboxsf/include/iprt/errcore.h
+++ b/ubuntu/vbox/vboxsf/include/iprt/errcore.h
@@ -231,9 +231,9 @@ RTDECL(int) RTErrConvertFromDarwin(int iNativeCode);
* Converts errno to iprt status code.
*
* @returns iprt status code.
- * @param uNativeCode errno code.
+ * @param iNativeCode errno code.
*/
-RTDECL(int) RTErrConvertFromErrno(unsigned uNativeCode);
+RTDECL(int) RTErrConvertFromErrno(int iNativeCode);
/**
* Converts a L4 errno to a iprt status code.
diff --git a/ubuntu/vbox/vboxsf/include/iprt/fs.h b/ubuntu/vbox/vboxsf/include/iprt/fs.h
index 30676533c6e8..d85c40b69d77 100644
--- a/ubuntu/vbox/vboxsf/include/iprt/fs.h
+++ b/ubuntu/vbox/vboxsf/include/iprt/fs.h
@@ -238,6 +238,8 @@ typedef enum RTFSTYPE
RTFSTYPE_FAT,
/** Extended File Allocation Table, main target are flash drives. */
RTFSTYPE_EXFAT,
+ /** Resilient File System. */
+ RTFSTYPE_REFS,
/* Solaris: */
/** Zettabyte File System. */
@@ -251,6 +253,7 @@ typedef enum RTFSTYPE
/** Hierarchical File System. */
RTFSTYPE_HFS,
/** @todo RTFSTYPE_HFS_PLUS? */
+ RTFSTYPE_APFS,
RTFSTYPE_AUTOFS,
RTFSTYPE_DEVFS,
diff --git a/ubuntu/vbox/vboxsf/include/iprt/list.h b/ubuntu/vbox/vboxsf/include/iprt/list.h
new file mode 100644
index 000000000000..7f9c12608534
--- /dev/null
+++ b/ubuntu/vbox/vboxsf/include/iprt/list.h
@@ -0,0 +1,539 @@
+/** @file
+ * IPRT - Generic Doubly Linked List.
+ */
+
+/*
+ * Copyright (C) 2010-2019 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+#ifndef IPRT_INCLUDED_list_h
+#define IPRT_INCLUDED_list_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#include <iprt/types.h>
+
+/** @defgroup grp_rt_list RTList - Generic Doubly Linked List
+ * @ingroup grp_rt
+ *
+ * The list implementation is circular without any type wise distintion between
+ * the list and its nodes. This can be confusing since the list head usually
+ * resides in a different structure than the nodes, so care must be taken when
+ * walking the list.
+ *
+ * @{
+ */
+
+RT_C_DECLS_BEGIN
+
+/**
+ * A list node of a doubly linked list.
+ */
+typedef struct RTLISTNODE
+{
+ /** Pointer to the next list node. */
+ struct RTLISTNODE *pNext;
+ /** Pointer to the previous list node. */
+ struct RTLISTNODE *pPrev;
+} RTLISTNODE;
+/** Pointer to a list node. */
+typedef RTLISTNODE *PRTLISTNODE;
+/** Pointer to a const list node. */
+typedef RTLISTNODE const *PCRTLISTNODE;
+/** Pointer to a list node pointer. */
+typedef PRTLISTNODE *PPRTLISTNODE;
+
+/** The anchor (head/tail) of a doubly linked list.
+ *
+ * @remarks Please use this instead of RTLISTNODE to indicate a list
+ * head/tail. It makes the code so much easier to read. Also,
+ * always mention the actual list node type(s) in the comment. */
+typedef RTLISTNODE RTLISTANCHOR;
+/** Pointer to a doubly linked list anchor. */
+typedef RTLISTANCHOR *PRTLISTANCHOR;
+/** Pointer to a const doubly linked list anchor. */
+typedef RTLISTANCHOR const *PCRTLISTANCHOR;
+
+/** Version of RTLISTNODE for holding a ring-3 only list in data which gets
+ * shared between multiple contexts */
+#if defined(IN_RING3)
+typedef RTLISTNODE RTLISTNODER3;
+#else
+typedef struct { RTR3PTR aOffLimits[2]; } RTLISTNODER3;
+#endif
+/** Version of RTLISTANCHOR for holding a ring-3 only list in data which gets
+ * shared between multiple contexts */
+typedef RTLISTNODER3 RTLISTANCHORR3;
+
+
+/**
+ * Initialize a list.
+ *
+ * @param pList Pointer to an unitialised list.
+ */
+DECLINLINE(void) RTListInit(PRTLISTNODE pList)
+{
+ pList->pNext = pList;
+ pList->pPrev = pList;
+}
+
+/**
+ * Append a node to the end of the list.
+ *
+ * @param pList The list to append the node to.
+ * @param pNode The node to append.
+ */
+DECLINLINE(void) RTListAppend(PRTLISTNODE pList, PRTLISTNODE pNode)
+{
+ pList->pPrev->pNext = pNode;
+ pNode->pPrev = pList->pPrev;
+ pNode->pNext = pList;
+ pList->pPrev = pNode;
+}
+
+/**
+ * Add a node as the first element of the list.
+ *
+ * @param pList The list to prepend the node to.
+ * @param pNode The node to prepend.
+ */
+DECLINLINE(void) RTListPrepend(PRTLISTNODE pList, PRTLISTNODE pNode)
+{
+ pList->pNext->pPrev = pNode;
+ pNode->pNext = pList->pNext;
+ pNode->pPrev = pList;
+ pList->pNext = pNode;
+}
+
+/**
+ * Inserts a node after the specified one.
+ *
+ * @param pCurNode The current node.
+ * @param pNewNode The node to insert.
+ */
+DECLINLINE(void) RTListNodeInsertAfter(PRTLISTNODE pCurNode, PRTLISTNODE pNewNode)
+{
+ RTListPrepend(pCurNode, pNewNode);
+}
+
+/**
+ * Inserts a node before the specified one.
+ *
+ * @param pCurNode The current node.
+ * @param pNewNode The node to insert.
+ */
+DECLINLINE(void) RTListNodeInsertBefore(PRTLISTNODE pCurNode, PRTLISTNODE pNewNode)
+{
+ RTListAppend(pCurNode, pNewNode);
+}
+
+/**
+ * Remove a node from a list.
+ *
+ * @param pNode The node to remove.
+ */
+DECLINLINE(void) RTListNodeRemove(PRTLISTNODE pNode)
+{
+ PRTLISTNODE pPrev = pNode->pPrev;
+ PRTLISTNODE pNext = pNode->pNext;
+
+ pPrev->pNext = pNext;
+ pNext->pPrev = pPrev;
+
+ /* poison */
+ pNode->pNext = NULL;
+ pNode->pPrev = NULL;
+}
+
+
+/**
+ * Remove a node from a list, returns value.
+ *
+ * @returns pNode
+ * @param pNode The node to remove.
+ */
+DECLINLINE(PRTLISTNODE) RTListNodeRemoveRet(PRTLISTNODE pNode)
+{
+ PRTLISTNODE pPrev = pNode->pPrev;
+ PRTLISTNODE pNext = pNode->pNext;
+
+ pPrev->pNext = pNext;
+ pNext->pPrev = pPrev;
+
+ /* poison */
+ pNode->pNext = NULL;
+ pNode->pPrev = NULL;
+
+ return pNode;
+}
+
+/**
+ * Checks if a node is the last element in the list.
+ *
+ * @retval true if the node is the last element in the list.
+ * @retval false otherwise
+ *
+ * @param pList The list.
+ * @param pNode The node to check.
+ */
+#define RTListNodeIsLast(pList, pNode) ((pNode)->pNext == (pList))
+
+/**
+ * Checks if a node is the first element in the list.
+ *
+ * @retval true if the node is the first element in the list.
+ * @retval false otherwise.
+ *
+ * @param pList The list.
+ * @param pNode The node to check.
+ */
+#define RTListNodeIsFirst(pList, pNode) ((pNode)->pPrev == (pList))
+
+/**
+ * Checks if a type converted node is actually the dummy element (@a pList).
+ *
+ * @retval true if the node is the dummy element in the list.
+ * @retval false otherwise.
+ *
+ * @param pList The list.
+ * @param pNode The node structure to check. Typically
+ * something obtained from RTListNodeGetNext() or
+ * RTListNodeGetPrev(). This is NOT a PRTLISTNODE
+ * but something that contains a RTLISTNODE member!
+ * @param Type Structure the list node is a member of.
+ * @param Member The list node member.
+ */
+#define RTListNodeIsDummy(pList, pNode, Type, Member) \
+ ( (pNode) == RT_FROM_MEMBER((pList), Type, Member) )
+/** @copydoc RTListNodeIsDummy */
+#define RTListNodeIsDummyCpp(pList, pNode, Type, Member) \
+ ( (pNode) == RT_FROM_CPP_MEMBER((pList), Type, Member) )
+
+/**
+ * Checks if a list is empty.
+ *
+ * @retval true if the list is empty.
+ * @retval false otherwise.
+ *
+ * @param pList The list to check.
+ */
+#define RTListIsEmpty(pList) ((pList)->pPrev == (pList))
+
+/**
+ * Returns the next node in the list.
+ *
+ * @returns The next node.
+ *
+ * @param pCurNode The current node.
+ * @param Type Structure the list node is a member of.
+ * @param Member The list node member.
+ */
+#define RTListNodeGetNext(pCurNode, Type, Member) \
+ RT_FROM_MEMBER((pCurNode)->pNext, Type, Member)
+/** @copydoc RTListNodeGetNext */
+#define RTListNodeGetNextCpp(pCurNode, Type, Member) \
+ RT_FROM_CPP_MEMBER((pCurNode)->pNext, Type, Member)
+
+/**
+ * Returns the previous node in the list.
+ *
+ * @returns The previous node.
+ *
+ * @param pCurNode The current node.
+ * @param Type Structure the list node is a member of.
+ * @param Member The list node member.
+ */
+#define RTListNodeGetPrev(pCurNode, Type, Member) \
+ RT_FROM_MEMBER((pCurNode)->pPrev, Type, Member)
+/** @copydoc RTListNodeGetPrev */
+#define RTListNodeGetPrevCpp(pCurNode, Type, Member) \
+ RT_FROM_CPP_MEMBER((pCurNode)->pPrev, Type, Member)
+
+/**
+ * Returns the first element in the list (checks for empty list).
+ *
+ * @returns Pointer to the first list element, or NULL if empty list.
+ *
+ * @param pList List to get the first element from.
+ * @param Type Structure the list node is a member of.
+ * @param Member The list node member.
+ */
+#define RTListGetFirst(pList, Type, Member) \
+ (!RTListIsEmpty(pList) ? RTListNodeGetNext(pList, Type, Member) : NULL)
+/** @copydoc RTListGetFirst */
+#define RTListGetFirstCpp(pList, Type, Member) \
+ (!RTListIsEmpty(pList) ? RTListNodeGetNextCpp(pList, Type, Member) : NULL)
+
+/**
+ * Returns the last element in the list (checks for empty list).
+ *
+ * @returns Pointer to the last list element, or NULL if empty list.
+ *
+ * @param pList List to get the last element from.
+ * @param Type Structure the list node is a member of.
+ * @param Member The list node member.
+ */
+#define RTListGetLast(pList, Type, Member) \
+ (!RTListIsEmpty(pList) ? RTListNodeGetPrev(pList, Type, Member) : NULL)
+/** @copydoc RTListGetLast */
+#define RTListGetLastCpp(pList, Type, Member) \
+ (!RTListIsEmpty(pList) ? RTListNodeGetPrevCpp(pList, Type, Member) : NULL)
+
+/**
+ * Returns the next node in the list or NULL if the end has been reached.
+ *
+ * @returns The next node, or NULL if end of list.
+ *
+ * @param pList The list @a pCurNode is linked on.
+ * @param pCurNode The current node, of type @a Type.
+ * @param Type Structure the list node is a member of.
+ * @param Member The list node member.
+ */
+#define RTListGetNext(pList, pCurNode, Type, Member) \
+ ( (pCurNode)->Member.pNext != (pList) ? RT_FROM_MEMBER((pCurNode)->Member.pNext, Type, Member) : NULL )
+/** @copydoc RTListGetNext */
+#define RTListGetNextCpp(pList, pCurNode, Type, Member) \
+ ( (pCurNode)->Member.pNext != (pList) ? RT_FROM_CPP_MEMBER((pCurNode)->Member.pNext, Type, Member) : NULL )
+
+/**
+ * Returns the previous node in the list or NULL if the start has been reached.
+ *
+ * @returns The previous node, or NULL if end of list.
+ *
+ * @param pList The list @a pCurNode is linked on.
+ * @param pCurNode The current node, of type @a Type.
+ * @param Type Structure the list node is a member of.
+ * @param Member The list node member.
+ */
+#define RTListGetPrev(pList, pCurNode, Type, Member) \
+ ( (pCurNode)->Member.pPrev != (pList) ? RT_FROM_MEMBER((pCurNode)->Member.pPrev, Type, Member) : NULL )
+/** @copydoc RTListGetPrev */
+#define RTListGetPrevCpp(pList, pCurNode, Type, Member) \
+ ( (pCurNode)->Member.pPrev != (pList) ? RT_FROM_CPP_MEMBER((pCurNode)->Member.pPrev, Type, Member) : NULL )
+
+
+/**
+ * Removes and returns the first element in the list (checks for empty list).
+ *
+ * @returns Pointer to the first list element, or NULL if empty list.
+ *
+ * @param pList List to get the first element from.
+ * @param Type Structure the list node is a member of.
+ * @param Member The list node member.
+ */
+#define RTListRemoveFirst(pList, Type, Member) \
+ (!RTListIsEmpty(pList) ? RT_FROM_MEMBER(RTListNodeRemoveRet((pList)->pNext), Type, Member) : NULL)
+/** @copydoc RTListRemoveFirst */
+#define RTListRemoveFirstCpp(pList, Type, Member) \
+ (!RTListIsEmpty(pList) ? RT_FROM_CPP_MEMBER(RTListNodeRemoveRet((pList)->pNext), Type, Member) : NULL)
+
+/**
+ * Removes and returns the last element in the list (checks for empty list).
+ *
+ * @returns Pointer to the last list element, or NULL if empty list.
+ *
+ * @param pList List to get the last element from.
+ * @param Type Structure the list node is a member of.
+ * @param Member The list node member.
+ */
+#define RTListRemoveLast(pList, Type, Member) \
+ (!RTListIsEmpty(pList) ? RT_FROM_MEMBER(RTListNodeRemoveRet((pList)->pPrev), Type, Member) : NULL)
+/** @copydoc RTListRemoveLast */
+#define RTListRemoveLastCpp(pList, Type, Member) \
+ (!RTListIsEmpty(pList) ? RT_FROM_CPP_MEMBER(RTListNodeRemoveRet((pList)->pPrev), Type, Member) : NULL)
+
+/**
+ * Removes and returns the next node in the list or NULL if the end has been
+ * reached.
+ *
+ * @returns The next node, or NULL if end of list.
+ *
+ * @param pList The list @a pCurNode is linked on.
+ * @param pCurNode The current node, of type @a Type.
+ * @param Type Structure the list node is a member of.
+ * @param Member The list node member.
+ */
+#define RTListRemoveNext(pList, pCurNode, Type, Member) \
+ ( (pCurNode)->Member.pNext != (pList) ? RT_FROM_MEMBER(RTListNodeRemoveRet((pCurNode)->Member.pNext), Type, Member) : NULL )
+/** @copydoc RTListRemoveNext */
+#define RTListRemoveNextCpp(pList, pCurNode, Type, Member) \
+ ( (pCurNode)->Member.pNext != (pList) ? RT_FROM_CPP_MEMBER(RTListNodeRemoveRet((pCurNode)->Member.pNext), Type, Member) : NULL )
+
+/**
+ * Removes and returns the previous node in the list or NULL if the start has
+ * been reached.
+ *
+ * @returns The previous node, or NULL if end of list.
+ *
+ * @param pList The list @a pCurNode is linked on.
+ * @param pCurNode The current node, of type @a Type.
+ * @param Type Structure the list node is a member of.
+ * @param Member The list node member.
+ */
+#define RTListRemovePrev(pList, pCurNode, Type, Member) \
+ ( (pCurNode)->Member.pNext != (pList) ? RT_FROM_MEMBER(RTListNodeRemoveRet((pCurNode)->Member.pPrev), Type, Member) : NULL )
+/** @copydoc RTListRemovePrev */
+#define RTListRemovePrevCpp(pList, pCurNode, Type, Member) \
+ ( (pCurNode)->Member.pNext != (pList) ? RT_FROM_CPP_MEMBER(RTListNodeRemoveRet((pCurNode)->Member.pPrev), Type, Member) : NULL )
+
+
+/**
+ * Enumerate the list in head to tail order.
+ *
+ * @param pList List to enumerate.
+ * @param pIterator The iterator variable name.
+ * @param Type Structure the list node is a member of.
+ * @param Member The list node member name.
+ */
+#define RTListForEach(pList, pIterator, Type, Member) \
+ for (pIterator = RTListNodeGetNext(pList, Type, Member); \
+ !RTListNodeIsDummy(pList, pIterator, Type, Member); \
+ pIterator = RT_FROM_MEMBER((pIterator)->Member.pNext, Type, Member) )
+/** @copydoc RTListForEach */
+#define RTListForEachCpp(pList, pIterator, Type, Member) \
+ for (pIterator = RTListNodeGetNextCpp(pList, Type, Member); \
+ !RTListNodeIsDummyCpp(pList, pIterator, Type, Member); \
+ pIterator = RT_FROM_CPP_MEMBER((pIterator)->Member.pNext, Type, Member) )
+
+
+/**
+ * Enumerate the list in head to tail order, safe against removal of the
+ * current node.
+ *
+ * @param pList List to enumerate.
+ * @param pIterator The iterator variable name.
+ * @param pIterNext The name of the variable saving the pointer to
+ * the next element.
+ * @param Type Structure the list node is a member of.
+ * @param Member The list node member name.
+ */
+#define RTListForEachSafe(pList, pIterator, pIterNext, Type, Member) \
+ for (pIterator = RTListNodeGetNext(pList, Type, Member), \
+ pIterNext = RT_FROM_MEMBER((pIterator)->Member.pNext, Type, Member); \
+ !RTListNodeIsDummy(pList, pIterator, Type, Member); \
+ pIterator = pIterNext, \
+ pIterNext = RT_FROM_MEMBER((pIterator)->Member.pNext, Type, Member) )
+/** @copydoc RTListForEachSafe */
+#define RTListForEachSafeCpp(pList, pIterator, pIterNext, Type, Member) \
+ for (pIterator = RTListNodeGetNextCpp(pList, Type, Member), \
+ pIterNext = RT_FROM_CPP_MEMBER((pIterator)->Member.pNext, Type, Member); \
+ !RTListNodeIsDummyCpp(pList, pIterator, Type, Member); \
+ pIterator = pIterNext, \
+ pIterNext = RT_FROM_CPP_MEMBER((pIterator)->Member.pNext, Type, Member) )
+
+
+/**
+ * Enumerate the list in reverse order (tail to head).
+ *
+ * @param pList List to enumerate.
+ * @param pIterator The iterator variable name.
+ * @param Type Structure the list node is a member of.
+ * @param Member The list node member name.
+ */
+#define RTListForEachReverse(pList, pIterator, Type, Member) \
+ for (pIterator = RTListNodeGetPrev(pList, Type, Member); \
+ !RTListNodeIsDummy(pList, pIterator, Type, Member); \
+ pIterator = RT_FROM_MEMBER((pIterator)->Member.pPrev, Type, Member) )
+/** @copydoc RTListForEachReverse */
+#define RTListForEachReverseCpp(pList, pIterator, Type, Member) \
+ for (pIterator = RTListNodeGetPrevCpp(pList, Type, Member); \
+ !RTListNodeIsDummyCpp(pList, pIterator, Type, Member); \
+ pIterator = RT_FROM_CPP_MEMBER((pIterator)->Member.pPrev, Type, Member) )
+
+
+/**
+ * Enumerate the list in reverse order (tail to head).
+ *
+ * @param pList List to enumerate.
+ * @param pIterator The iterator variable name.
+ * @param pIterPrev The name of the variable saving the pointer to
+ * the previous element.
+ * @param Type Structure the list node is a member of.
+ * @param Member The list node member name.
+ */
+#define RTListForEachReverseSafe(pList, pIterator, pIterPrev, Type, Member) \
+ for (pIterator = RTListNodeGetPrev(pList, Type, Member), \
+ pIterPrev = RT_FROM_MEMBER((pIterator)->Member.pPrev, Type, Member); \
+ !RTListNodeIsDummy(pList, pIterator, Type, Member); \
+ pIterator = pIterPrev, \
+ pIterPrev = RT_FROM_MEMBER((pIterator)->Member.pPrev, Type, Member) )
+/** @copydoc RTListForEachReverseSafe */
+#define RTListForEachReverseSafeCpp(pList, pIterator, pIterPrev, Type, Member) \
+ for (pIterator = RTListNodeGetPrevCpp(pList, Type, Member), \
+ pIterPrev = RT_FROM_CPP_MEMBER((pIterator)->Member.pPrev, Type, Member); \
+ !RTListNodeIsDummyCpp(pList, pIterator, Type, Member); \
+ pIterator = pIterPrev, \
+ pIterPrev = RT_FROM_CPP_MEMBER((pIterator)->Member.pPrev, Type, Member) )
+
+
+/**
+ * Move the given list to a new list header.
+ *
+ * @param pListDst The new list.
+ * @param pListSrc The list to move.
+ */
+DECLINLINE(void) RTListMove(PRTLISTNODE pListDst, PRTLISTNODE pListSrc)
+{
+ if (!RTListIsEmpty(pListSrc))
+ {
+ pListDst->pNext = pListSrc->pNext;
+ pListDst->pPrev = pListSrc->pPrev;
+
+ /* Adjust the first and last element links */
+ pListDst->pNext->pPrev = pListDst;
+ pListDst->pPrev->pNext = pListDst;
+
+ /* Finally remove the elements from the source list */
+ RTListInit(pListSrc);
+ }
+ else
+ RTListInit(pListDst);
+}
+
+/**
+ * List concatenation.
+ *
+ * @returns nothing.
+ * @param pListDst The destination list.
+ * @param pListSrc The source list to concatenate.
+ */
+DECLINLINE(void) RTListConcatenate(PRTLISTANCHOR pListDst, PRTLISTANCHOR pListSrc)
+{
+ if (!RTListIsEmpty(pListSrc))
+ {
+ PRTLISTNODE pFirst = pListSrc->pNext;
+ PRTLISTNODE pLast = pListSrc->pPrev;
+
+ pListDst->pPrev->pNext = pFirst;
+ pFirst->pPrev = pListDst->pPrev;
+ pLast->pNext = pListDst;
+ pListDst->pPrev = pLast;
+
+ /* Finally remove the elements from the source list */
+ RTListInit(pListSrc);
+ }
+}
+
+RT_C_DECLS_END
+
+/** @} */
+
+#endif /* !IPRT_INCLUDED_list_h */
diff --git a/ubuntu/vbox/vboxsf/include/iprt/log.h b/ubuntu/vbox/vboxsf/include/iprt/log.h
index 3c99cb21944a..fbd6f5fd7a8a 100644
--- a/ubuntu/vbox/vboxsf/include/iprt/log.h
+++ b/ubuntu/vbox/vboxsf/include/iprt/log.h
@@ -614,7 +614,7 @@ RTDECL(void) RTLogPrintfEx(void *pvInstance, unsigned fFlags, unsigned iGroup,
# define _LogIt(a_fFlags, a_iGroup, ...) \
do \
{ \
- register PRTLOGGER LogIt_pLogger = RTLogDefaultInstanceEx(RT_MAKE_U32(a_fFlags, a_iGroup)); \
+ PRTLOGGER LogIt_pLogger = RTLogDefaultInstanceEx(RT_MAKE_U32(a_fFlags, a_iGroup)); \
if (RT_LIKELY(!LogIt_pLogger)) \
{ /* likely */ } \
else \
@@ -628,7 +628,7 @@ RTDECL(void) RTLogPrintfEx(void *pvInstance, unsigned fFlags, unsigned iGroup,
# define LogIt(a_fFlags, a_iGroup, fmtargs) \
do \
{ \
- register PRTLOGGER LogIt_pLogger = RTLogDefaultInstanceEx(RT_MAKE_U32(a_fFlags, a_iGroup)); \
+ PRTLOGGER LogIt_pLogger = RTLogDefaultInstanceEx(RT_MAKE_U32(a_fFlags, a_iGroup)); \
if (RT_LIKELY(!LogIt_pLogger)) \
{ /* likely */ } \
else \
@@ -639,7 +639,7 @@ RTDECL(void) RTLogPrintfEx(void *pvInstance, unsigned fFlags, unsigned iGroup,
# define LogItAlways(a_fFlags, a_iGroup, fmtargs) \
do \
{ \
- register PRTLOGGER LogIt_pLogger = RTLogDefaultInstanceEx(RT_MAKE_U32(0, UINT16_MAX)); \
+ PRTLOGGER LogIt_pLogger = RTLogDefaultInstanceEx(RT_MAKE_U32(0, UINT16_MAX)); \
if (LogIt_pLogger) \
LogIt_pLogger->pfnLogger fmtargs; \
} while (0)
@@ -2011,6 +2011,8 @@ RTDECL(int) RTLogCreate(PRTLOGGER *ppLogger, uint32_t fFlags, const char *pszGro
* @param cGroups Number of groups in the array.
* @param papszGroups Pointer to array of groups. This must stick
* around for the life of the logger instance.
+ * @param cMaxEntriesPerGroup The max number of entries per group. UINT32_MAX
+ * or zero for unlimited.
* @param fDestFlags The destination flags. RTLOGDEST_FILE is ORed
* if pszFilenameFmt specified.
* @param pfnPhase Callback function for starting logging and for
@@ -2028,11 +2030,11 @@ RTDECL(int) RTLogCreate(PRTLOGGER *ppLogger, uint32_t fFlags, const char *pszGro
* @param pszFilenameFmt Log filename format string. Standard RTStrFormat().
* @param ... Format arguments.
*/
-RTDECL(int) RTLogCreateEx(PRTLOGGER *ppLogger, uint32_t fFlags, const char *pszGroupSettings,
- const char *pszEnvVarBase, unsigned cGroups, const char * const * papszGroups,
+RTDECL(int) RTLogCreateEx(PRTLOGGER *ppLogger, uint32_t fFlags, const char *pszGroupSettings, const char *pszEnvVarBase,
+ unsigned cGroups, const char * const * papszGroups, uint32_t cMaxEntriesPerGroup,
uint32_t fDestFlags, PFNRTLOGPHASE pfnPhase, uint32_t cHistory,
uint64_t cbHistoryFileMax, uint32_t cSecsHistoryTimeSlot, PRTERRINFO pErrInfo,
- const char *pszFilenameFmt, ...) RT_IPRT_FORMAT_ATTR_MAYBE_NULL(13, 14);
+ const char *pszFilenameFmt, ...) RT_IPRT_FORMAT_ATTR_MAYBE_NULL(14, 15);
/**
* Create a logger instance.
@@ -2048,6 +2050,8 @@ RTDECL(int) RTLogCreateEx(PRTLOGGER *ppLogger, uint32_t fFlags, const char *pszG
* @param cGroups Number of groups in the array.
* @param papszGroups Pointer to array of groups. This must stick
* around for the life of the logger instance.
+ * @param cMaxEntriesPerGroup The max number of entries per group. UINT32_MAX
+ * or zero for unlimited.
* @param fDestFlags The destination flags. RTLOGDEST_FILE is ORed
* if pszFilenameFmt specified.
* @param pfnPhase Callback function for starting logging and for
@@ -2066,11 +2070,11 @@ RTDECL(int) RTLogCreateEx(PRTLOGGER *ppLogger, uint32_t fFlags, const char *pszG
* RTStrFormat().
* @param args Format arguments.
*/
-RTDECL(int) RTLogCreateExV(PRTLOGGER *ppLogger, uint32_t fFlags, const char *pszGroupSettings,
- const char *pszEnvVarBase, unsigned cGroups, const char * const * papszGroups,
+RTDECL(int) RTLogCreateExV(PRTLOGGER *ppLogger, uint32_t fFlags, const char *pszGroupSettings, const char *pszEnvVarBase,
+ unsigned cGroups, const char * const * papszGroups, uint32_t cMaxEntriesPerGroup,
uint32_t fDestFlags, PFNRTLOGPHASE pfnPhase, uint32_t cHistory,
uint64_t cbHistoryFileMax, uint32_t cSecsHistoryTimeSlot, PRTERRINFO pErrInfo,
- const char *pszFilenameFmt, va_list args) RT_IPRT_FORMAT_ATTR_MAYBE_NULL(13, 0);
+ const char *pszFilenameFmt, va_list args) RT_IPRT_FORMAT_ATTR_MAYBE_NULL(14, 0);
/**
* Create a logger instance for singled threaded ring-0 usage.
diff --git a/ubuntu/vbox/vboxsf/include/iprt/mangling.h b/ubuntu/vbox/vboxsf/include/iprt/mangling.h
index b59ef06a6c4d..f1d2c89dadd1 100644
--- a/ubuntu/vbox/vboxsf/include/iprt/mangling.h
+++ b/ubuntu/vbox/vboxsf/include/iprt/mangling.h
@@ -808,6 +808,7 @@
# define RTVfsDirOpenDir RT_MANGLER(RTVfsDirOpenDir)
# define RTVfsDirFromRTDir RT_MANGLER(RTVfsDirFromRTDir)
# define RTVfsDirOpenNormal RT_MANGLER(RTVfsDirOpenNormal)
+# define RTVfsDirIsStdDir RT_MANGLER(RTVfsDirIsStdDir)
# define RTDvmCreate RT_MANGLER(RTDvmCreate)
# define RTDvmCreateFromVfsFile RT_MANGLER(RTDvmCreateFromVfsFile)
# define RTDvmRetain RT_MANGLER(RTDvmRetain)
@@ -912,9 +913,14 @@
# define RTFileCompareByHandlesEx RT_MANGLER(RTFileCompareByHandlesEx)
# define RTFileCompareEx RT_MANGLER(RTFileCompareEx)
# define RTFileCopy RT_MANGLER(RTFileCopy)
+# define RTFileCopyAttributes RT_MANGLER(RTFileCopyAttributes)
# define RTFileCopyByHandles RT_MANGLER(RTFileCopyByHandles)
# define RTFileCopyByHandlesEx RT_MANGLER(RTFileCopyByHandlesEx)
# define RTFileCopyEx RT_MANGLER(RTFileCopyEx)
+# define RTFileCopyPart RT_MANGLER(RTFileCopyPart)
+# define RTFileCopyPartCleanup RT_MANGLER(RTFileCopyPartCleanup)
+# define RTFileCopyPartEx RT_MANGLER(RTFileCopyPartEx)
+# define RTFileCopyPartPrep RT_MANGLER(RTFileCopyPartPrep)
# define RTFileCreateTemp RT_MANGLER(RTFileCreateTemp)
# define RTFileCreateTempSecure RT_MANGLER(RTFileCreateTempSecure)
# define RTFileDelete RT_MANGLER(RTFileDelete)
@@ -932,6 +938,7 @@
# define RTFileMove RT_MANGLER(RTFileMove)
# define RTFileOpen RT_MANGLER(RTFileOpen)
# define RTFileOpenBitBucket RT_MANGLER(RTFileOpenBitBucket)
+# define RTFileOpenEx RT_MANGLER(RTFileOpenEx)
# define RTFileOpenF RT_MANGLER(RTFileOpenF)
# define RTFileOpenV RT_MANGLER(RTFileOpenV)
# define RTFileOpenTemp RT_MANGLER(RTFileOpenTemp)
@@ -953,7 +960,9 @@
# define RTFileSetOwner RT_MANGLER(RTFileSetOwner)
# define RTFileSetSize RT_MANGLER(RTFileSetSize)
# define RTFileSetTimes RT_MANGLER(RTFileSetTimes)
+# define RTFileSgRead RT_MANGLER(RTFileSgRead)
# define RTFileSgReadAt RT_MANGLER(RTFileSgReadAt)
+# define RTFileSgWrite RT_MANGLER(RTFileSgWrite)
# define RTFileSgWriteAt RT_MANGLER(RTFileSgWriteAt)
# define RTFileTell RT_MANGLER(RTFileTell)
# define RTFileToNative RT_MANGLER(RTFileToNative)
@@ -1710,6 +1719,7 @@
# define RTR0MemUserCopyFrom RT_MANGLER(RTR0MemUserCopyFrom) /* r0drv */
# define RTR0MemUserCopyTo RT_MANGLER(RTR0MemUserCopyTo) /* r0drv */
# define RTR0MemUserIsValidAddr RT_MANGLER(RTR0MemUserIsValidAddr) /* r0drv */
+# define rtR0MemObjLinuxVirtToPage RT_MANGLER(rtR0MemObjLinuxVirtToPage) /* r0drv linux-only */
# define RTR0ProcHandleSelf RT_MANGLER(RTR0ProcHandleSelf) /* r0drv */
# define RTR0Term RT_MANGLER(RTR0Term) /* r0drv */
# define RTR0TermForced RT_MANGLER(RTR0TermForced) /* r0drv */
@@ -2574,6 +2584,7 @@
# define RTVfsDirReadEx RT_MANGLER(RTVfsDirReadEx)
# define RTVfsDirRemoveDir RT_MANGLER(RTVfsDirRemoveDir)
# define RTVfsDirSetPathMode RT_MANGLER(RTVfsDirSetPathMode)
+# define RTVfsDirToPrivate RT_MANGLER(RTVfsDirToPrivate)
# define RTVfsFileFlush RT_MANGLER(RTVfsFileFlush)
# define RTVfsFileFromBuffer RT_MANGLER(RTVfsFileFromBuffer)
# define RTVfsFileFromRTFile RT_MANGLER(RTVfsFileFromRTFile)
diff --git a/ubuntu/vbox/vboxsf/include/iprt/types.h b/ubuntu/vbox/vboxsf/include/iprt/types.h
index f33af38a9e06..8e3e635ba74d 100644
--- a/ubuntu/vbox/vboxsf/include/iprt/types.h
+++ b/ubuntu/vbox/vboxsf/include/iprt/types.h
@@ -59,21 +59,28 @@ RT_C_DECLS_END
# include <sys/types.h>
# elif defined(RT_OS_FREEBSD) && defined(_KERNEL)
+# include <sys/param.h>
+# undef PVM
+# if __FreeBSD_version < 1200000
/*
* Kludge for the FreeBSD kernel:
* stddef.h and sys/types.h have slightly different offsetof definitions
* when compiling in kernel mode. This is just to make GCC shut up.
*/
-# ifndef _STDDEF_H_
-# undef offsetof
-# endif
-# include <sys/stddef.h>
-# ifndef _SYS_TYPES_H_
-# undef offsetof
-# endif
-# include <sys/types.h>
-# ifndef offsetof
-# error "offsetof is not defined!"
+# ifndef _STDDEF_H_
+# undef offsetof
+# endif
+# include <sys/stddef.h>
+# ifndef _SYS_TYPES_H_
+# undef offsetof
+# endif
+# include <sys/types.h>
+# ifndef offsetof
+# error "offsetof is not defined!"
+# endif
+# else
+# include <sys/stddef.h>
+# include <sys/types.h>
# endif
# elif defined(RT_OS_FREEBSD) && HC_ARCH_BITS == 64 && defined(RT_ARCH_X86)
diff --git a/ubuntu/vbox/vboxsf/lnkops.c b/ubuntu/vbox/vboxsf/lnkops.c
index d735b6778356..c8a08168ae1a 100644
--- a/ubuntu/vbox/vboxsf/lnkops.c
+++ b/ubuntu/vbox/vboxsf/lnkops.c
@@ -28,92 +28,278 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
#include "vfsmod.h"
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
-# if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)
-# if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
-static const char *sf_follow_link(struct dentry *dentry, void **cookie)
-# else
-static void *sf_follow_link(struct dentry *dentry, struct nameidata *nd)
-# endif
+/**
+ * Converts error codes as best we can.
+ */
+DECLINLINE(int) vbsf_convert_symlink_error(int vrc)
{
- struct inode *inode = dentry->d_inode;
- struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
- struct sf_inode_info *sf_i = GET_INODE_INFO(inode);
- int error = -ENOMEM;
- char *path = (char *)get_zeroed_page(GFP_KERNEL);
- int rc;
-
- if (path) {
- error = 0;
- rc = VbglR0SfReadLink(&client_handle, &sf_g->map, sf_i->path,
- PATH_MAX, path);
- if (RT_FAILURE(rc)) {
- LogFunc(("VbglR0SfReadLink failed, caller=%s, rc=%Rrc\n", __func__, rc));
- free_page((unsigned long)path);
- error = -EPROTO;
- }
- }
-# if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
- return error ? ERR_PTR(error) : (*cookie = path);
-# else
- nd_set_link(nd, error ? ERR_PTR(error) : path);
- return NULL;
-# endif
+ if ( vrc == VERR_IS_A_DIRECTORY
+ || vrc == VERR_IS_A_FIFO
+ || vrc == VERR_IS_A_FILE
+ || vrc == VERR_IS_A_BLOCK_DEVICE
+ || vrc == VERR_IS_A_CHAR_DEVICE
+ || vrc == VERR_IS_A_SOCKET
+ || vrc == VERR_NOT_SYMLINK)
+ return -EINVAL;
+ if (vrc == VERR_PATH_NOT_FOUND)
+ return -ENOTDIR;
+ if (vrc == VERR_FILE_NOT_FOUND)
+ return -ENOENT;
+ return -EPROTO;
}
-# if LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0)
-static void sf_put_link(struct dentry *dentry, struct nameidata *nd,
- void *cookie)
+
+/**
+ * Does the NLS conversion of the symlink target.
+ */
+static int vbsf_symlink_nls_convert_slow(struct vbsf_super_info *pSuperInfo, char *pszTarget, size_t cbTargetBuf)
{
- char *page = nd_get_link(nd);
- if (!IS_ERR(page))
- free_page((unsigned long)page);
+ int rc;
+ size_t const cchUtf8 = RTStrNLen(pszTarget, cbTargetBuf);
+ if (cchUtf8 < cbTargetBuf) {
+ /*
+ * If the target is short and there is a lot of space left in the target
+ * buffer (typically PAGE_SIZE in size), we move the input to the end
+ * instead of allocating a temporary buffer for it. This works because
+ * there shouldn't be anything that is more than 8x worse than UTF-8
+ * when it comes to efficiency.
+ */
+ char *pszFree = NULL;
+ char *pszUtf8;
+ if (cchUtf8 - 1 <= cbTargetBuf / 8) {
+ pszUtf8 = &pszTarget[cbTargetBuf - cchUtf8 - 1];
+ cbTargetBuf -= cchUtf8 - 1;
+ } else {
+ pszFree = pszUtf8 = kmalloc(cchUtf8 + 1, GFP_KERNEL);
+ if (RT_UNLIKELY(!pszUtf8)) {
+ LogRelMax(50, ("vbsf_symlink_nls_convert_slow: failed to allocate %u bytes\n", cchUtf8 + 1));
+ return -ENOMEM;
+ }
+ }
+ memcpy(pszUtf8, pszTarget, cchUtf8);
+ pszUtf8[cchUtf8] = '\0';
+
+ rc = vbsf_nlscpy(pSuperInfo, pszTarget, cbTargetBuf, pszUtf8, cchUtf8);
+ if (pszFree)
+ kfree(pszFree);
+ } else {
+ SFLOGFLOW(("vbsf_symlink_nls_convert_slow: Impossible! Unterminated target!\n"));
+ rc = -ENAMETOOLONG;
+ }
+ return rc;
}
-# endif
-# else /* LINUX_VERSION_CODE >= 4.5.0 */
-static const char *sf_get_link(struct dentry *dentry, struct inode *inode,
- struct delayed_call *done)
+
+/**
+ * Does NLS conversion if needed.
+ */
+DECLINLINE(int) vbsf_symlink_nls_convert(struct vbsf_super_info *pSuperInfo, char *pszTarget, size_t cbTargetBuf)
+{
+ if (pSuperInfo->fNlsIsUtf8)
+ return 0;
+ return vbsf_symlink_nls_convert_slow(pSuperInfo, pszTarget, cbTargetBuf);
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)
+
+/**
+ * Get symbolic link.
+ */
+static const char *vbsf_get_link(struct dentry *dentry, struct inode *inode, struct delayed_call *done)
{
- struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
- struct sf_inode_info *sf_i = GET_INODE_INFO(inode);
- char *path;
- int rc;
-
- if (!dentry)
- return ERR_PTR(-ECHILD);
- path = kzalloc(PAGE_SIZE, GFP_KERNEL);
- if (!path)
- return ERR_PTR(-ENOMEM);
- rc = VbglR0SfReadLink(&client_handle, &sf_g->map, sf_i->path, PATH_MAX,
- path);
- if (RT_FAILURE(rc)) {
- LogFunc(("VbglR0SfReadLink failed, caller=%s, rc=%Rrc\n",
- __func__, rc));
- kfree(path);
- return ERR_PTR(-EPROTO);
- }
- set_delayed_call(done, kfree_link, path);
- return path;
+ char *pszTarget;
+ if (dentry) {
+ pszTarget = (char *)kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (pszTarget) {
+ struct vbsf_super_info *pSuperInfo = VBSF_GET_SUPER_INFO(inode->i_sb);
+ struct vbsf_inode_info *sf_i = VBSF_GET_INODE_INFO(inode);
+ int rc = VbglR0SfHostReqReadLinkContigSimple(pSuperInfo->map.root, sf_i->path->String.ach, sf_i->path->u16Length,
+ pszTarget, virt_to_phys(pszTarget), RT_MIN(PATH_MAX, PAGE_SIZE - 1));
+ if (RT_SUCCESS(rc)) {
+ pszTarget[PAGE_SIZE - 1] = '\0';
+ SFLOGFLOW(("vbsf_get_link: %s -> %s\n", sf_i->path->String.ach, pszTarget));
+ rc = vbsf_symlink_nls_convert(pSuperInfo, pszTarget, PAGE_SIZE);
+ if (rc == 0) {
+ vbsf_dentry_chain_increase_ttl(dentry);
+ set_delayed_call(done, kfree_link, pszTarget);
+ return pszTarget;
+ }
+ } else {
+ SFLOGFLOW(("vbsf_get_link: VbglR0SfHostReqReadLinkContigSimple failed on '%s': %Rrc\n",
+ sf_i->path->String.ach, rc));
+ }
+ kfree(pszTarget);
+ pszTarget = ERR_PTR(vbsf_convert_symlink_error(rc));
+ } else
+ pszTarget = ERR_PTR(-ENOMEM);
+ } else
+ pszTarget = ERR_PTR(-ECHILD);
+ return pszTarget;
}
-# endif /* LINUX_VERSION_CODE >= 4.5.0 */
-struct inode_operations sf_lnk_iops = {
-# if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
- .readlink = generic_readlink,
+#else /* < 4.5 */
+
+# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 8)
+/**
+ * Reads the link into the given buffer.
+ */
+static int vbsf_readlink(struct dentry *dentry, char *buffer, int len)
+{
+ int rc;
+ char *pszTarget = (char *)get_zeroed_page(GFP_KERNEL);
+ if (pszTarget) {
+ struct inode *inode = dentry->d_inode;
+ struct vbsf_super_info *pSuperInfo = VBSF_GET_SUPER_INFO(inode->i_sb);
+ struct vbsf_inode_info *sf_i = VBSF_GET_INODE_INFO(inode);
+ rc = VbglR0SfHostReqReadLinkContigSimple(pSuperInfo->map.root, sf_i->path->String.ach, sf_i->path->u16Length,
+ pszTarget, virt_to_phys(pszTarget), RT_MIN(PATH_MAX, PAGE_SIZE - 1));
+ if (RT_SUCCESS(rc)) {
+ pszTarget[PAGE_SIZE - 1] = '\0';
+ SFLOGFLOW(("vbsf_readlink: %s -> %*s\n", sf_i->path->String.ach, pszTarget));
+ rc = vbsf_symlink_nls_convert(pSuperInfo, pszTarget, PAGE_SIZE);
+ if (rc == 0) {
+ vbsf_dentry_chain_increase_ttl(dentry);
+ rc = vfs_readlink(dentry, buffer, len, pszTarget);
+ }
+ } else {
+ SFLOGFLOW(("vbsf_readlink: VbglR0SfHostReqReadLinkContigSimple failed on '%s': %Rrc\n", sf_i->path->String.ach, rc));
+ rc = vbsf_convert_symlink_error(rc);
+ }
+ free_page((unsigned long)pszTarget);
+ } else
+ rc = -ENOMEM;
+ return rc;
+}
+# endif /* < 2.6.8 */
+
+/**
+ * Follow link in dentry.
+ */
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
+static const char *vbsf_follow_link(struct dentry *dentry, void **cookie)
+# elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 13)
+static void *vbsf_follow_link(struct dentry *dentry, struct nameidata *nd)
+# else
+static int vbsf_follow_link(struct dentry *dentry, struct nameidata *nd)
+# endif
+{
+ int rc;
+ char *pszTarget = (char *)get_zeroed_page(GFP_KERNEL);
+ if (pszTarget) {
+ struct inode *inode = dentry->d_inode;
+ struct vbsf_super_info *pSuperInfo = VBSF_GET_SUPER_INFO(inode->i_sb);
+ struct vbsf_inode_info *sf_i = VBSF_GET_INODE_INFO(inode);
+
+ rc = VbglR0SfHostReqReadLinkContigSimple(pSuperInfo->map.root, sf_i->path->String.ach, sf_i->path->u16Length,
+ pszTarget, virt_to_phys(pszTarget), RT_MIN(PATH_MAX, PAGE_SIZE - 1));
+ if (RT_SUCCESS(rc)) {
+ pszTarget[PAGE_SIZE - 1] = '\0';
+ SFLOGFLOW(("vbsf_follow_link: %s -> %s\n", sf_i->path->String.ach, pszTarget));
+ rc = vbsf_symlink_nls_convert(pSuperInfo, pszTarget, PAGE_SIZE);
+ if (rc == 0) {
+ /*
+ * Succeeded. For 2.6.8 and later the page gets associated
+ * with the caller-cookie or nameidata structure and freed
+ * later by vbsf_put_link(). On earlier kernels we have to
+ * call vfs_follow_link() which will try continue the walking
+ * using the buffer we pass it here.
+ */
+ vbsf_dentry_chain_increase_ttl(dentry);
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
+ *cookie = pszTarget;
+ return pszTarget;
+# elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 8)
+ nd_set_link(nd, pszTarget);
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 13)
+ return NULL;
+# else
+ return 0;
+# endif
+# else /* < 2.6.8 */
+ rc = vfs_follow_link(nd, pszTarget);
+ free_page((unsigned long)pszTarget);
+ return rc;
# endif
-# if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)
- .get_link = sf_get_link
-# elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
- .follow_link = sf_follow_link,
- .put_link = free_page_put_link,
+ }
+
+ /*
+ * Failed.
+ */
+ } else {
+ LogFunc(("VbglR0SfReadLink failed, caller=%s, rc=%Rrc\n", __func__, rc));
+ rc = vbsf_convert_symlink_error(rc);
+ }
+ free_page((unsigned long)pszTarget);
+ } else {
+ rc = -ENOMEM;
+ }
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
+ *cookie = ERR_PTR(rc);
+ return (const char *)ERR_PTR(rc);
+# elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 8)
+ nd_set_link(nd, (char *)ERR_PTR(rc));
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 13)
+ return NULL;
+# else
+ return 0;
+# endif
+# else /* < 2.6.8 */
+ return rc;
+# endif /* < 2.6.8 */
+}
+
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 8)
+/**
+ * For freeing target link buffer allocated by vbsf_follow_link.
+ *
+ * For kernels before 2.6.8 memory isn't being kept around.
+ */
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
+static void vbsf_put_link(struct inode *inode, void *cookie)
+# elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 13)
+static void vbsf_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
+# else
+static void vbsf_put_link(struct dentry *dentry, struct nameidata *nd)
+# endif
+{
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 13)
+ char *page = cookie;
+# else
+ char *page = nd_get_link(nd);
+# endif
+ SFLOGFLOW(("vbsf_put_link: page=%p\n", page));
+ if (!IS_ERR(page))
+ free_page((unsigned long)page);
+}
+# endif /* >= 2.6.8 */
+
+#endif /* < 4.5.0 */
+
+/**
+ * Symlink inode operations.
+ */
+struct inode_operations vbsf_lnk_iops = {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 8)
+ .readlink = generic_readlink,
# else
- .follow_link = sf_follow_link,
- .put_link = sf_put_link
+ .readlink = vbsf_readlink,
+# endif
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)
+ .get_link = vbsf_get_link
+#else
+ .follow_link = vbsf_follow_link,
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 8)
+ .put_link = vbsf_put_link,
# endif
+#endif
};
-#endif /* LINUX_VERSION_CODE >= 2.6.0 */
diff --git a/ubuntu/vbox/vboxsf/r0drv/linux/the-linux-kernel.h b/ubuntu/vbox/vboxsf/r0drv/linux/the-linux-kernel.h
index e31f2fee1c5b..66dd734b08d1 100644
--- a/ubuntu/vbox/vboxsf/r0drv/linux/the-linux-kernel.h
+++ b/ubuntu/vbox/vboxsf/r0drv/linux/the-linux-kernel.h
@@ -128,6 +128,9 @@
# include <linux/cpu.h>
# include <linux/notifier.h>
#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0)
+# include <uapi/linux/mman.h>
+#endif
/* For the basic additions module */
#include <linux/pci.h>
#include <linux/delay.h>
@@ -159,7 +162,7 @@
# include <linux/tqueue.h>
#endif
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 4)
# include <linux/kthread.h>
#endif
@@ -457,5 +460,10 @@ typedef struct tq_struct RTR0LNXWORKQUEUEITEM;
DECLHIDDEN(void) rtR0LnxWorkqueuePush(RTR0LNXWORKQUEUEITEM *pWork, void (*pfnWorker)(RTR0LNXWORKQUEUEITEM *));
DECLHIDDEN(void) rtR0LnxWorkqueueFlush(void);
+/*
+ * Memory hacks from memobj-r0drv-linux.c that shared folders need.
+ */
+RTDECL(struct page *) rtR0MemObjLinuxVirtToPage(void *pv);
+
#endif /* !IPRT_INCLUDED_SRC_r0drv_linux_the_linux_kernel_h */
diff --git a/ubuntu/vbox/vboxsf/regops.c b/ubuntu/vbox/vboxsf/regops.c
index 646d6062668a..98d50a542d95 100644
--- a/ubuntu/vbox/vboxsf/regops.c
+++ b/ubuntu/vbox/vboxsf/regops.c
@@ -28,402 +28,2951 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-/*
- * Limitations: only COW memory mapping is supported
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include "vfsmod.h"
+#include <linux/uio.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 32)
+# include <linux/aio.h> /* struct kiocb before 4.1 */
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 12)
+# include <linux/buffer_head.h>
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 12) \
+ && LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
+# include <linux/writeback.h>
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23) \
+ && LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0)
+# include <linux/splice.h>
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 17) \
+ && LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 23)
+# include <linux/pipe_fs_i.h>
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 10)
+# include <linux/swap.h> /* for mark_page_accessed */
+#endif
+#include <iprt/err.h>
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
+# define SEEK_END 2
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0)
+# define iter_is_iovec(a_pIter) ( !((a_pIter)->type & ITER_KVEC) )
+#elif LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0)
+# define iter_is_iovec(a_pIter) ( !((a_pIter)->type & (ITER_KVEC | ITER_BVEC)) )
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 17, 0)
+# define vm_fault_t int
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 20)
+# define pgoff_t unsigned long
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 12)
+# define PageUptodate(a_pPage) Page_Uptodate(a_pPage)
+#endif
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0)
+struct vbsf_iov_iter {
+ unsigned int type;
+ unsigned int v_write : 1;
+ size_t iov_offset;
+ size_t nr_segs;
+ struct iovec const *iov;
+# ifdef VBOX_STRICT
+ struct iovec const *iov_org;
+ size_t nr_segs_org;
+# endif
+};
+# ifdef VBOX_STRICT
+# define VBSF_IOV_ITER_INITIALIZER(a_cSegs, a_pIov, a_fWrite) \
+ { vbsf_iov_iter_detect_type(a_pIov, a_cSegs), a_fWrite, 0, a_cSegs, a_pIov, a_pIov, a_cSegs }
+# else
+# define VBSF_IOV_ITER_INITIALIZER(a_cSegs, a_pIov, a_fWrite) \
+ { vbsf_iov_iter_detect_type(a_pIov, a_cSegs), a_fWrite, 0, a_cSegs, a_pIov }
+# endif
+# define ITER_KVEC 1
+# define iov_iter vbsf_iov_iter
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
+/** Used by vbsf_iter_lock_pages() to keep the first page of the next segment. */
+struct vbsf_iter_stash {
+ struct page *pPage;
+ size_t off;
+ size_t cb;
+# if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
+ size_t offFromEnd;
+ struct iov_iter Copy;
+# endif
+};
+#endif /* >= 3.16.0 */
+/** Initializer for struct vbsf_iter_stash. */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
+# define VBSF_ITER_STASH_INITIALIZER { NULL, 0 }
+#else
+# define VBSF_ITER_STASH_INITIALIZER { NULL, 0, ~(size_t)0 }
+#endif
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+DECLINLINE(void) vbsf_put_page(struct page *pPage);
+static void vbsf_unlock_user_pages(struct page **papPages, size_t cPages, bool fSetDirty, bool fLockPgHack);
+static void vbsf_reg_write_sync_page_cache(struct address_space *mapping, loff_t offFile, uint32_t cbRange,
+ uint8_t const *pbSrcBuf, struct page **papSrcPages,
+ uint32_t offSrcPage, size_t cSrcPages);
+
+
+/*********************************************************************************************************************************
+* Provide more recent uio.h functionality to older kernels. *
+*********************************************************************************************************************************/
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
+
+/**
+ * Detects the vector type.
+ */
+static int vbsf_iov_iter_detect_type(struct iovec const *paIov, size_t cSegs)
+{
+ /* Check the first segment with a non-zero length. */
+ while (cSegs-- > 0) {
+ if (paIov->iov_len > 0) {
+ if (access_ok(VERIFY_READ, paIov->iov_base, paIov->iov_len))
+ return (uintptr_t)paIov->iov_base >= USER_DS.seg ? ITER_KVEC : 0;
+ AssertMsgFailed(("%p LB %#zx\n", paIov->iov_base, paIov->iov_len));
+ break;
+ }
+ paIov++;
+ }
+ return 0;
+}
+
+
+# undef iov_iter_count
+# define iov_iter_count(a_pIter) vbsf_iov_iter_count(a_pIter)
+static size_t vbsf_iov_iter_count(struct vbsf_iov_iter const *iter)
+{
+ size_t cbRet = 0;
+ size_t cLeft = iter->nr_segs;
+ struct iovec const *iov = iter->iov;
+ while (cLeft-- > 0) {
+ cbRet += iov->iov_len;
+ iov++;
+ }
+ return cbRet - iter->iov_offset;
+}
+
+
+# undef iov_iter_single_seg_count
+# define iov_iter_single_seg_count(a_pIter) vbsf_iov_iter_single_seg_count(a_pIter)
+static size_t vbsf_iov_iter_single_seg_count(struct vbsf_iov_iter const *iter)
+{
+ if (iter->nr_segs > 0)
+ return iter->iov->iov_len - iter->iov_offset;
+ return 0;
+}
+
+
+# undef iov_iter_advance
+# define iov_iter_advance(a_pIter, a_cbSkip) vbsf_iov_iter_advance(a_pIter, a_cbSkip)
+static void vbsf_iov_iter_advance(struct vbsf_iov_iter *iter, size_t cbSkip)
+{
+ SFLOG2(("vbsf_iov_iter_advance: cbSkip=%#zx\n", cbSkip));
+ if (iter->nr_segs > 0) {
+ size_t const cbLeftCur = iter->iov->iov_len - iter->iov_offset;
+ Assert(iter->iov_offset <= iter->iov->iov_len);
+ if (cbLeftCur > cbSkip) {
+ iter->iov_offset += cbSkip;
+ } else {
+ cbSkip -= cbLeftCur;
+ iter->iov_offset = 0;
+ iter->iov++;
+ iter->nr_segs--;
+ while (iter->nr_segs > 0) {
+ size_t const cbSeg = iter->iov->iov_len;
+ if (cbSeg > cbSkip) {
+ iter->iov_offset = cbSkip;
+ break;
+ }
+ cbSkip -= cbSeg;
+ iter->iov++;
+ iter->nr_segs--;
+ }
+ }
+ }
+}
+
+
+# undef iov_iter_get_pages
+# define iov_iter_get_pages(a_pIter, a_papPages, a_cbMax, a_cMaxPages, a_poffPg0) \
+ vbsf_iov_iter_get_pages(a_pIter, a_papPages, a_cbMax, a_cMaxPages, a_poffPg0)
+static ssize_t vbsf_iov_iter_get_pages(struct vbsf_iov_iter *iter, struct page **papPages,
+ size_t cbMax, unsigned cMaxPages, size_t *poffPg0)
+{
+ while (iter->nr_segs > 0) {
+ size_t const cbLeft = iter->iov->iov_len - iter->iov_offset;
+ Assert(iter->iov->iov_len >= iter->iov_offset);
+ if (cbLeft > 0) {
+ uintptr_t uPtrFrom = (uintptr_t)iter->iov->iov_base + iter->iov_offset;
+ size_t offPg0 = *poffPg0 = uPtrFrom & PAGE_OFFSET_MASK;
+ size_t cPagesLeft = RT_ALIGN_Z(offPg0 + cbLeft, PAGE_SIZE) >> PAGE_SHIFT;
+ size_t cPages = RT_MIN(cPagesLeft, cMaxPages);
+ struct task_struct *pTask = current;
+ size_t cPagesLocked;
+
+ down_read(&pTask->mm->mmap_sem);
+ cPagesLocked = get_user_pages(pTask, pTask->mm, uPtrFrom, cPages, iter->v_write, 1 /*force*/, papPages, NULL);
+ up_read(&pTask->mm->mmap_sem);
+ if (cPagesLocked == cPages) {
+ size_t cbRet = (cPages << PAGE_SHIFT) - offPg0;
+ if (cPages == cPagesLeft) {
+ size_t offLastPg = (uPtrFrom + cbLeft) & PAGE_OFFSET_MASK;
+ if (offLastPg)
+ cbRet -= PAGE_SIZE - offLastPg;
+ }
+ Assert(cbRet <= cbLeft);
+ return cbRet;
+ }
+ if (cPagesLocked > 0)
+ vbsf_unlock_user_pages(papPages, cPagesLocked, false /*fSetDirty*/, false /*fLockPgHack*/);
+ return -EFAULT;
+ }
+ iter->iov_offset = 0;
+ iter->iov++;
+ iter->nr_segs--;
+ }
+ AssertFailed();
+ return 0;
+}
+
+
+# undef iov_iter_truncate
+# define iov_iter_truncate(iter, cbNew) vbsf_iov_iter_truncate(iter, cbNew)
+static void vbsf_iov_iter_truncate(struct vbsf_iov_iter *iter, size_t cbNew)
+{
+ /* we have no counter or stuff, so it's a no-op. */
+ RT_NOREF(iter, cbNew);
+}
+
+
+# undef iov_iter_revert
+# define iov_iter_revert(a_pIter, a_cbRewind) vbsf_iov_iter_revert(a_pIter, a_cbRewind)
+void vbsf_iov_iter_revert(struct vbsf_iov_iter *iter, size_t cbRewind)
+{
+ SFLOG2(("vbsf_iov_iter_revert: cbRewind=%#zx\n", cbRewind));
+ if (iter->iov_offset > 0) {
+ if (cbRewind <= iter->iov_offset) {
+ iter->iov_offset -= cbRewind;
+ return;
+ }
+ cbRewind -= iter->iov_offset;
+ iter->iov_offset = 0;
+ }
+
+ while (cbRewind > 0) {
+ struct iovec const *pIov = --iter->iov;
+ size_t const cbSeg = pIov->iov_len;
+ iter->nr_segs++;
+
+ Assert((uintptr_t)pIov >= (uintptr_t)iter->iov_org);
+ Assert(iter->nr_segs <= iter->nr_segs_org);
+
+ if (cbRewind <= cbSeg) {
+ iter->iov_offset = cbSeg - cbRewind;
+ break;
+ }
+ cbRewind -= cbSeg;
+ }
+}
+
+#endif /* 2.6.19 <= linux < 3.16.0 */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0) && LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
+
+/** This is for implementing cMaxPage on 3.16 which doesn't have it. */
+static ssize_t vbsf_iov_iter_get_pages_3_16(struct iov_iter *iter, struct page **papPages,
+ size_t cbMax, unsigned cMaxPages, size_t *poffPg0)
+{
+ if (!(iter->type & ITER_BVEC)) {
+ size_t const offPg0 = iter->iov_offset & PAGE_OFFSET_MASK;
+ size_t const cbMaxPages = ((size_t)cMaxPages << PAGE_SHIFT) - offPg0;
+ if (cbMax > cbMaxPages)
+ cbMax = cbMaxPages;
+ }
+ /* else: BVEC works a page at a time and shouldn't have much of a problem here. */
+ return iov_iter_get_pages(iter, papPages, cbMax, poffPg0);
+}
+# undef iov_iter_get_pages
+# define iov_iter_get_pages(a_pIter, a_papPages, a_cbMax, a_cMaxPages, a_poffPg0) \
+ vbsf_iov_iter_get_pages_3_16(a_pIter, a_papPages, a_cbMax, a_cMaxPages, a_poffPg0)
+
+#endif /* 3.16.x */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19) && LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)
+
+static size_t copy_from_iter(uint8_t *pbDst, size_t cbToCopy, struct iov_iter *pSrcIter)
+{
+ size_t const cbTotal = cbToCopy;
+ Assert(iov_iter_count(pSrcIter) >= cbToCopy);
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
+ if (pSrcIter->type & ITER_BVEC) {
+ while (cbToCopy > 0) {
+ size_t const offPage = (uintptr_t)pbDst & PAGE_OFFSET_MASK;
+ size_t const cbThisCopy = RT_MIN(PAGE_SIZE - offPage, cbToCopy);
+ struct page *pPage = rtR0MemObjLinuxVirtToPage(pbDst);
+ size_t cbCopied = copy_page_from_iter(pPage, offPage, cbThisCopy, pSrcIter);
+ AssertStmt(cbCopied <= cbThisCopy, cbCopied = cbThisCopy);
+ pbDst += cbCopied;
+ cbToCopy -= cbCopied;
+ if (cbCopied != cbToCopy)
+ break;
+ }
+ } else
+# endif
+ {
+ while (cbToCopy > 0) {
+ size_t cbThisCopy = iov_iter_single_seg_count(pSrcIter);
+ if (cbThisCopy > 0) {
+ if (cbThisCopy > cbToCopy)
+ cbThisCopy = cbToCopy;
+ if (pSrcIter->type & ITER_KVEC)
+ memcpy(pbDst, (void *)pSrcIter->iov->iov_base + pSrcIter->iov_offset, cbThisCopy);
+ else if (!copy_from_user(pbDst, pSrcIter->iov->iov_base + pSrcIter->iov_offset, cbThisCopy))
+ break;
+ pbDst += cbThisCopy;
+ cbToCopy -= cbThisCopy;
+ }
+ iov_iter_advance(pSrcIter, cbThisCopy);
+ }
+ }
+ return cbTotal - cbToCopy;
+}
+
+
+static size_t copy_to_iter(uint8_t const *pbSrc, size_t cbToCopy, struct iov_iter *pDstIter)
+{
+ size_t const cbTotal = cbToCopy;
+ Assert(iov_iter_count(pDstIter) >= cbToCopy);
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
+ if (pDstIter->type & ITER_BVEC) {
+ while (cbToCopy > 0) {
+ size_t const offPage = (uintptr_t)pbSrc & PAGE_OFFSET_MASK;
+ size_t const cbThisCopy = RT_MIN(PAGE_SIZE - offPage, cbToCopy);
+ struct page *pPage = rtR0MemObjLinuxVirtToPage((void *)pbSrc);
+ size_t cbCopied = copy_page_to_iter(pPage, offPage, cbThisCopy, pDstIter);
+ AssertStmt(cbCopied <= cbThisCopy, cbCopied = cbThisCopy);
+ pbSrc += cbCopied;
+ cbToCopy -= cbCopied;
+ if (cbCopied != cbToCopy)
+ break;
+ }
+ } else
+# endif
+ {
+ while (cbToCopy > 0) {
+ size_t cbThisCopy = iov_iter_single_seg_count(pDstIter);
+ if (cbThisCopy > 0) {
+ if (cbThisCopy > cbToCopy)
+ cbThisCopy = cbToCopy;
+ if (pDstIter->type & ITER_KVEC)
+ memcpy((void *)pDstIter->iov->iov_base + pDstIter->iov_offset, pbSrc, cbThisCopy);
+ else if (!copy_to_user(pDstIter->iov->iov_base + pDstIter->iov_offset, pbSrc, cbThisCopy)) {
+ break;
+ }
+ pbSrc += cbThisCopy;
+ cbToCopy -= cbThisCopy;
+ }
+ iov_iter_advance(pDstIter, cbThisCopy);
+ }
+ }
+ return cbTotal - cbToCopy;
+}
+
+#endif /* 3.16.0 <= linux < 3.18.0 */
+
+
+
+/*********************************************************************************************************************************
+* Handle management *
+*********************************************************************************************************************************/
+
+/**
+ * Called when an inode is released to unlink all handles that might impossibly
+ * still be associated with it.
+ *
+ * @param pInodeInfo The inode which handles to drop.
+ */
+void vbsf_handle_drop_chain(struct vbsf_inode_info *pInodeInfo)
+{
+ struct vbsf_handle *pCur, *pNext;
+ unsigned long fSavedFlags;
+ SFLOGFLOW(("vbsf_handle_drop_chain: %p\n", pInodeInfo));
+ spin_lock_irqsave(&g_SfHandleLock, fSavedFlags);
+
+ RTListForEachSafe(&pInodeInfo->HandleList, pCur, pNext, struct vbsf_handle, Entry) {
+ AssertMsg( (pCur->fFlags & (VBSF_HANDLE_F_MAGIC_MASK | VBSF_HANDLE_F_ON_LIST))
+ == (VBSF_HANDLE_F_MAGIC | VBSF_HANDLE_F_ON_LIST), ("%p %#x\n", pCur, pCur->fFlags));
+ pCur->fFlags |= VBSF_HANDLE_F_ON_LIST;
+ RTListNodeRemove(&pCur->Entry);
+ }
+
+ spin_unlock_irqrestore(&g_SfHandleLock, fSavedFlags);
+}
+
+
+/**
+ * Locates a handle that matches all the flags in @a fFlags.
+ *
+ * @returns Pointer to handle on success (retained), use vbsf_handle_release() to
+ * release it. NULL if no suitable handle was found.
+ * @param pInodeInfo The inode info to search.
+ * @param fFlagsSet The flags that must be set.
+ * @param fFlagsClear The flags that must be clear.
+ */
+struct vbsf_handle *vbsf_handle_find(struct vbsf_inode_info *pInodeInfo, uint32_t fFlagsSet, uint32_t fFlagsClear)
+{
+ struct vbsf_handle *pCur;
+ unsigned long fSavedFlags;
+ spin_lock_irqsave(&g_SfHandleLock, fSavedFlags);
+
+ RTListForEach(&pInodeInfo->HandleList, pCur, struct vbsf_handle, Entry) {
+ AssertMsg( (pCur->fFlags & (VBSF_HANDLE_F_MAGIC_MASK | VBSF_HANDLE_F_ON_LIST))
+ == (VBSF_HANDLE_F_MAGIC | VBSF_HANDLE_F_ON_LIST), ("%p %#x\n", pCur, pCur->fFlags));
+ if ((pCur->fFlags & (fFlagsSet | fFlagsClear)) == fFlagsSet) {
+ uint32_t cRefs = ASMAtomicIncU32(&pCur->cRefs);
+ if (cRefs > 1) {
+ spin_unlock_irqrestore(&g_SfHandleLock, fSavedFlags);
+ SFLOGFLOW(("vbsf_handle_find: returns %p\n", pCur));
+ return pCur;
+ }
+ /* Oops, already being closed (safe as it's only ever increased here). */
+ ASMAtomicDecU32(&pCur->cRefs);
+ }
+ }
+
+ spin_unlock_irqrestore(&g_SfHandleLock, fSavedFlags);
+ SFLOGFLOW(("vbsf_handle_find: returns NULL!\n"));
+ return NULL;
+}
+
+
+/**
+ * Slow worker for vbsf_handle_release() that does the freeing.
+ *
+ * @returns 0 (ref count).
+ * @param pHandle The handle to release.
+ * @param pSuperInfo The info structure for the shared folder associated with
+ * the handle.
+ * @param pszCaller The caller name (for logging failures).
+ */
+uint32_t vbsf_handle_release_slow(struct vbsf_handle *pHandle, struct vbsf_super_info *pSuperInfo, const char *pszCaller)
+{
+ int rc;
+ unsigned long fSavedFlags;
+
+ SFLOGFLOW(("vbsf_handle_release_slow: %p (%s)\n", pHandle, pszCaller));
+
+ /*
+ * Remove from the list.
+ */
+ spin_lock_irqsave(&g_SfHandleLock, fSavedFlags);
+
+ AssertMsg((pHandle->fFlags & VBSF_HANDLE_F_MAGIC_MASK) == VBSF_HANDLE_F_MAGIC, ("%p %#x\n", pHandle, pHandle->fFlags));
+ Assert(pHandle->pInodeInfo);
+ Assert(pHandle->pInodeInfo && pHandle->pInodeInfo->u32Magic == SF_INODE_INFO_MAGIC);
+
+ if (pHandle->fFlags & VBSF_HANDLE_F_ON_LIST) {
+ pHandle->fFlags &= ~VBSF_HANDLE_F_ON_LIST;
+ RTListNodeRemove(&pHandle->Entry);
+ }
+
+ spin_unlock_irqrestore(&g_SfHandleLock, fSavedFlags);
+
+ /*
+ * Actually destroy it.
+ */
+ rc = VbglR0SfHostReqCloseSimple(pSuperInfo->map.root, pHandle->hHost);
+ if (RT_FAILURE(rc))
+ LogFunc(("Caller %s: VbglR0SfHostReqCloseSimple %#RX64 failed with rc=%Rrc\n", pszCaller, pHandle->hHost, rc));
+ pHandle->hHost = SHFL_HANDLE_NIL;
+ pHandle->fFlags = VBSF_HANDLE_F_MAGIC_DEAD;
+ kfree(pHandle);
+ return 0;
+}
+
+
+/**
+ * Appends a handle to a handle list.
+ *
+ * @param pInodeInfo The inode to add it to.
+ * @param pHandle The handle to add.
+ */
+void vbsf_handle_append(struct vbsf_inode_info *pInodeInfo, struct vbsf_handle *pHandle)
+{
+#ifdef VBOX_STRICT
+ struct vbsf_handle *pCur;
+#endif
+ unsigned long fSavedFlags;
+
+ SFLOGFLOW(("vbsf_handle_append: %p (to %p)\n", pHandle, pInodeInfo));
+ AssertMsg((pHandle->fFlags & (VBSF_HANDLE_F_MAGIC_MASK | VBSF_HANDLE_F_ON_LIST)) == VBSF_HANDLE_F_MAGIC,
+ ("%p %#x\n", pHandle, pHandle->fFlags));
+ Assert(pInodeInfo->u32Magic == SF_INODE_INFO_MAGIC);
+
+ spin_lock_irqsave(&g_SfHandleLock, fSavedFlags);
+
+ AssertMsg((pHandle->fFlags & (VBSF_HANDLE_F_MAGIC_MASK | VBSF_HANDLE_F_ON_LIST)) == VBSF_HANDLE_F_MAGIC,
+ ("%p %#x\n", pHandle, pHandle->fFlags));
+#ifdef VBOX_STRICT
+ RTListForEach(&pInodeInfo->HandleList, pCur, struct vbsf_handle, Entry) {
+ Assert(pCur != pHandle);
+ AssertMsg( (pCur->fFlags & (VBSF_HANDLE_F_MAGIC_MASK | VBSF_HANDLE_F_ON_LIST))
+ == (VBSF_HANDLE_F_MAGIC | VBSF_HANDLE_F_ON_LIST), ("%p %#x\n", pCur, pCur->fFlags));
+ }
+ pHandle->pInodeInfo = pInodeInfo;
+#endif
+
+ pHandle->fFlags |= VBSF_HANDLE_F_ON_LIST;
+ RTListAppend(&pInodeInfo->HandleList, &pHandle->Entry);
+
+ spin_unlock_irqrestore(&g_SfHandleLock, fSavedFlags);
+}
+
+
+
+/*********************************************************************************************************************************
+* Misc *
+*********************************************************************************************************************************/
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 6)
+/** Any writable mappings? */
+DECLINLINE(bool) mapping_writably_mapped(struct address_space const *mapping)
+{
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 6)
+ return !list_empty(&mapping->i_mmap_shared);
+# else
+ return mapping->i_mmap_shared != NULL;
+# endif
+}
+#endif
+
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 12)
+/** Missing in 2.4.x, so just stub it for now. */
+DECLINLINE(bool) PageWriteback(struct page const *page)
+{
+ return false;
+}
+#endif
+
+
+/**
+ * Helper for deciding wheter we should do a read via the page cache or not.
+ *
+ * By default we will only use the page cache if there is a writable memory
+ * mapping of the file with a chance that it may have modified any of the pages
+ * already.
+ */
+DECLINLINE(bool) vbsf_should_use_cached_read(struct file *file, struct address_space *mapping, struct vbsf_super_info *pSuperInfo)
+{
+ if ( (file->f_flags & O_DIRECT)
+ || pSuperInfo->enmCacheMode == kVbsfCacheMode_None)
+ return false;
+ if ( pSuperInfo->enmCacheMode == kVbsfCacheMode_Read
+ || pSuperInfo->enmCacheMode == kVbsfCacheMode_ReadWrite)
+ return true;
+ Assert(pSuperInfo->enmCacheMode == kVbsfCacheMode_Strict);
+ return mapping
+ && mapping->nrpages > 0
+ && mapping_writably_mapped(mapping);
+}
+
+
+
+/*********************************************************************************************************************************
+* Pipe / splice stuff mainly for 2.6.17 >= linux < 2.6.31 (where no fallbacks were available) *
+*********************************************************************************************************************************/
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 17) \
+ && LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0)
+
+# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30)
+# define LOCK_PIPE(a_pPipe) do { if ((a_pPipe)->inode) mutex_lock(&(a_pPipe)->inode->i_mutex); } while (0)
+# define UNLOCK_PIPE(a_pPipe) do { if ((a_pPipe)->inode) mutex_unlock(&(a_pPipe)->inode->i_mutex); } while (0)
+# else
+# define LOCK_PIPE(a_pPipe) pipe_lock(a_pPipe)
+# define UNLOCK_PIPE(a_pPipe) pipe_unlock(a_pPipe)
+# endif
+
+
+/** Waits for the pipe buffer status to change. */
+static void vbsf_wait_pipe(struct pipe_inode_info *pPipe)
+{
+ DEFINE_WAIT(WaitStuff);
+# ifdef TASK_NONINTERACTIVE
+ prepare_to_wait(&pPipe->wait, &WaitStuff, TASK_INTERRUPTIBLE | TASK_NONINTERACTIVE);
+# else
+ prepare_to_wait(&pPipe->wait, &WaitStuff, TASK_INTERRUPTIBLE);
+# endif
+ UNLOCK_PIPE(pPipe);
+
+ schedule();
+
+ finish_wait(&pPipe->wait, &WaitStuff);
+ LOCK_PIPE(pPipe);
+}
+
+
+/** Worker for vbsf_feed_pages_to_pipe that wakes up readers. */
+static void vbsf_wake_up_pipe(struct pipe_inode_info *pPipe, bool fReaders)
+{
+ smp_mb();
+ if (waitqueue_active(&pPipe->wait))
+ wake_up_interruptible_sync(&pPipe->wait);
+ if (fReaders)
+ kill_fasync(&pPipe->fasync_readers, SIGIO, POLL_IN);
+ else
+ kill_fasync(&pPipe->fasync_writers, SIGIO, POLL_OUT);
+}
+
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 17) \
+ && LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
+
+/** Verify pipe buffer content (needed for page-cache to ensure idle page). */
+static int vbsf_pipe_buf_confirm(struct pipe_inode_info *pPipe, struct pipe_buffer *pPipeBuf)
+{
+ /*SFLOG3(("vbsf_pipe_buf_confirm: %p\n", pPipeBuf));*/
+ return 0;
+}
+
+
+/** Maps the buffer page. */
+static void *vbsf_pipe_buf_map(struct pipe_inode_info *pPipe, struct pipe_buffer *pPipeBuf, int atomic)
+{
+ void *pvRet;
+ if (!atomic)
+ pvRet = kmap(pPipeBuf->page);
+ else {
+ pPipeBuf->flags |= PIPE_BUF_FLAG_ATOMIC;
+ pvRet = kmap_atomic(pPipeBuf->page, KM_USER0);
+ }
+ /*SFLOG3(("vbsf_pipe_buf_map: %p -> %p\n", pPipeBuf, pvRet));*/
+ return pvRet;
+}
+
+
+/** Unmaps the buffer page. */
+static void vbsf_pipe_buf_unmap(struct pipe_inode_info *pPipe, struct pipe_buffer *pPipeBuf, void *pvMapping)
+{
+ /*SFLOG3(("vbsf_pipe_buf_unmap: %p/%p\n", pPipeBuf, pvMapping)); */
+ if (!(pPipeBuf->flags & PIPE_BUF_FLAG_ATOMIC))
+ kunmap(pPipeBuf->page);
+ else {
+ pPipeBuf->flags &= ~PIPE_BUF_FLAG_ATOMIC;
+ kunmap_atomic(pvMapping, KM_USER0);
+ }
+}
+
+
+/** Gets a reference to the page. */
+static void vbsf_pipe_buf_get(struct pipe_inode_info *pPipe, struct pipe_buffer *pPipeBuf)
+{
+ page_cache_get(pPipeBuf->page);
+ /*SFLOG3(("vbsf_pipe_buf_get: %p (return count=%d)\n", pPipeBuf, page_count(pPipeBuf->page)));*/
+}
+
+
+/** Release the buffer page (counter to vbsf_pipe_buf_get). */
+static void vbsf_pipe_buf_release(struct pipe_inode_info *pPipe, struct pipe_buffer *pPipeBuf)
+{
+ /*SFLOG3(("vbsf_pipe_buf_release: %p (incoming count=%d)\n", pPipeBuf, page_count(pPipeBuf->page)));*/
+ page_cache_release(pPipeBuf->page);
+}
+
+
+/** Attempt to steal the page.
+ * @returns 0 success, 1 on failure. */
+static int vbsf_pipe_buf_steal(struct pipe_inode_info *pPipe, struct pipe_buffer *pPipeBuf)
+{
+ if (page_count(pPipeBuf->page) == 1) {
+ lock_page(pPipeBuf->page);
+ SFLOG3(("vbsf_pipe_buf_steal: %p -> 0\n", pPipeBuf));
+ return 0;
+ }
+ SFLOG3(("vbsf_pipe_buf_steal: %p -> 1\n", pPipeBuf));
+ return 1;
+}
+
+
+/**
+ * Pipe buffer operations for used by vbsf_feed_pages_to_pipe.
+ */
+static struct pipe_buf_operations vbsf_pipe_buf_ops = {
+ .can_merge = 0,
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23)
+ .confirm = vbsf_pipe_buf_confirm,
+# else
+ .pin = vbsf_pipe_buf_confirm,
+# endif
+ .map = vbsf_pipe_buf_map,
+ .unmap = vbsf_pipe_buf_unmap,
+ .get = vbsf_pipe_buf_get,
+ .release = vbsf_pipe_buf_release,
+ .steal = vbsf_pipe_buf_steal,
+};
+
+
+/**
+ * Feeds the pages to the pipe.
+ *
+ * Pages given to the pipe are set to NULL in papPages.
+ */
+static ssize_t vbsf_feed_pages_to_pipe(struct pipe_inode_info *pPipe, struct page **papPages, size_t cPages, uint32_t offPg0,
+ uint32_t cbActual, unsigned fFlags)
+{
+ ssize_t cbRet = 0;
+ size_t iPage = 0;
+ bool fNeedWakeUp = false;
+
+ LOCK_PIPE(pPipe);
+ for (;;) {
+ if ( pPipe->readers > 0
+ && pPipe->nrbufs < PIPE_BUFFERS) {
+ struct pipe_buffer *pPipeBuf = &pPipe->bufs[(pPipe->curbuf + pPipe->nrbufs) % PIPE_BUFFERS];
+ uint32_t const cbThisPage = RT_MIN(cbActual, PAGE_SIZE - offPg0);
+ pPipeBuf->len = cbThisPage;
+ pPipeBuf->offset = offPg0;
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23)
+ pPipeBuf->private = 0;
+# endif
+ pPipeBuf->ops = &vbsf_pipe_buf_ops;
+ pPipeBuf->flags = fFlags & SPLICE_F_GIFT ? PIPE_BUF_FLAG_GIFT : 0;
+ pPipeBuf->page = papPages[iPage];
+
+ papPages[iPage++] = NULL;
+ pPipe->nrbufs++;
+ fNeedWakeUp |= pPipe->inode != NULL;
+ offPg0 = 0;
+ cbRet += cbThisPage;
+
+ /* done? */
+ cbActual -= cbThisPage;
+ if (!cbActual)
+ break;
+ } else if (pPipe->readers == 0) {
+ SFLOGFLOW(("vbsf_feed_pages_to_pipe: no readers!\n"));
+ send_sig(SIGPIPE, current, 0);
+ if (cbRet == 0)
+ cbRet = -EPIPE;
+ break;
+ } else if (fFlags & SPLICE_F_NONBLOCK) {
+ if (cbRet == 0)
+ cbRet = -EAGAIN;
+ break;
+ } else if (signal_pending(current)) {
+ if (cbRet == 0)
+ cbRet = -ERESTARTSYS;
+ SFLOGFLOW(("vbsf_feed_pages_to_pipe: pending signal! (%zd)\n", cbRet));
+ break;
+ } else {
+ if (fNeedWakeUp) {
+ vbsf_wake_up_pipe(pPipe, true /*fReaders*/);
+ fNeedWakeUp = 0;
+ }
+ pPipe->waiting_writers++;
+ vbsf_wait_pipe(pPipe);
+ pPipe->waiting_writers--;
+ }
+ }
+ UNLOCK_PIPE(pPipe);
+
+ if (fNeedWakeUp)
+ vbsf_wake_up_pipe(pPipe, true /*fReaders*/);
+
+ return cbRet;
+}
+
+
+/**
+ * For splicing from a file to a pipe.
+ */
+static ssize_t vbsf_splice_read(struct file *file, loff_t *poffset, struct pipe_inode_info *pipe, size_t len, unsigned int flags)
+{
+ struct inode *inode = VBSF_GET_F_DENTRY(file)->d_inode;
+ struct vbsf_super_info *pSuperInfo = VBSF_GET_SUPER_INFO(inode->i_sb);
+ ssize_t cbRet;
+
+ SFLOGFLOW(("vbsf_splice_read: file=%p poffset=%p{%#RX64} pipe=%p len=%#zx flags=%#x\n", file, poffset, *poffset, pipe, len, flags));
+ if (vbsf_should_use_cached_read(file, inode->i_mapping, pSuperInfo)) {
+ cbRet = generic_file_splice_read(file, poffset, pipe, len, flags);
+ } else {
+ /*
+ * Create a read request.
+ */
+ loff_t offFile = *poffset;
+ size_t cPages = RT_MIN(RT_ALIGN_Z((offFile & ~PAGE_CACHE_MASK) + len, PAGE_CACHE_SIZE) >> PAGE_CACHE_SHIFT,
+ PIPE_BUFFERS);
+ VBOXSFREADPGLSTREQ *pReq = (VBOXSFREADPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFREADPGLSTREQ,
+ PgLst.aPages[cPages]));
+ if (pReq) {
+ /*
+ * Allocate pages.
+ */
+ struct page *apPages[PIPE_BUFFERS];
+ size_t i;
+ pReq->PgLst.offFirstPage = (uint16_t)offFile & (uint16_t)PAGE_OFFSET_MASK;
+ cbRet = 0;
+ for (i = 0; i < cPages; i++) {
+ struct page *pPage;
+ apPages[i] = pPage = alloc_page(GFP_USER);
+ if (pPage) {
+ pReq->PgLst.aPages[i] = page_to_phys(pPage);
+# ifdef VBOX_STRICT
+ ASMMemFill32(kmap(pPage), PAGE_SIZE, UINT32_C(0xdeadbeef));
+ kunmap(pPage);
+# endif
+ } else {
+ cbRet = -ENOMEM;
+ break;
+ }
+ }
+ if (cbRet == 0) {
+ /*
+ * Do the reading.
+ */
+ uint32_t const cbToRead = RT_MIN((cPages << PAGE_SHIFT) - (offFile & PAGE_OFFSET_MASK), len);
+ struct vbsf_reg_info *sf_r = (struct vbsf_reg_info *)file->private_data;
+ int vrc = VbglR0SfHostReqReadPgLst(pSuperInfo->map.root, pReq, sf_r->Handle.hHost, offFile, cbToRead, cPages);
+ if (RT_SUCCESS(vrc)) {
+ /*
+ * Get the number of bytes read, jettison the request
+ * and, in case of EOF, any unnecessary pages.
+ */
+ uint32_t cbActual = pReq->Parms.cb32Read.u.value32;
+ AssertStmt(cbActual <= cbToRead, cbActual = cbToRead);
+ SFLOG2(("vbsf_splice_read: read -> %#x bytes @ %#RX64\n", cbActual, offFile));
+
+ VbglR0PhysHeapFree(pReq);
+ pReq = NULL;
+
+ /*
+ * Now, feed it to the pipe thingy.
+ * This will take ownership of the all pages no matter what happens.
+ */
+ cbRet = vbsf_feed_pages_to_pipe(pipe, apPages, cPages, offFile & PAGE_OFFSET_MASK, cbActual, flags);
+ if (cbRet > 0)
+ *poffset = offFile + cbRet;
+ } else {
+ cbRet = -RTErrConvertToErrno(vrc);
+ SFLOGFLOW(("vbsf_splice_read: Read failed: %Rrc -> %zd\n", vrc, cbRet));
+ }
+ i = cPages;
+ }
+
+ while (i-- > 0)
+ if (apPages[i])
+ __free_pages(apPages[i], 0);
+ if (pReq)
+ VbglR0PhysHeapFree(pReq);
+ } else {
+ cbRet = -ENOMEM;
+ }
+ }
+ SFLOGFLOW(("vbsf_splice_read: returns %zd (%#zx), *poffset=%#RX64\n", cbRet, cbRet, *poffset));
+ return cbRet;
+}
+
+#endif /* 2.6.17 <= LINUX_VERSION_CODE < 2.6.31 */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 17) \
+ && LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0)
+
+/**
+ * For splicing from a pipe to a file.
+ *
+ * Since we can combine buffers and request allocations, this should be faster
+ * than the default implementation.
+ */
+static ssize_t vbsf_splice_write(struct pipe_inode_info *pPipe, struct file *file, loff_t *poffset, size_t len, unsigned int flags)
+{
+ struct inode *inode = VBSF_GET_F_DENTRY(file)->d_inode;
+ struct vbsf_super_info *pSuperInfo = VBSF_GET_SUPER_INFO(inode->i_sb);
+ ssize_t cbRet;
+
+ SFLOGFLOW(("vbsf_splice_write: pPipe=%p file=%p poffset=%p{%#RX64} len=%#zx flags=%#x\n", pPipe, file, poffset, *poffset, len, flags));
+ /** @todo later if (false) {
+ cbRet = generic_file_splice_write(pPipe, file, poffset, len, flags);
+ } else */ {
+ /*
+ * Prepare a write request.
+ */
+# ifdef PIPE_BUFFERS
+ uint32_t const cMaxPages = RT_MIN(PIPE_BUFFERS, RT_ALIGN_Z(len, PAGE_SIZE) >> PAGE_SHIFT);
+# else
+ uint32_t const cMaxPages = RT_MIN(RT_MAX(RT_MIN(pPipe->buffers, 256), PIPE_DEF_BUFFERS),
+ RT_ALIGN_Z(len, PAGE_SIZE) >> PAGE_SHIFT);
+# endif
+ VBOXSFWRITEPGLSTREQ *pReq = (VBOXSFWRITEPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFREADPGLSTREQ,
+ PgLst.aPages[cMaxPages]));
+ if (pReq) {
+ /*
+ * Feed from the pipe.
+ */
+ struct vbsf_reg_info *sf_r = (struct vbsf_reg_info *)file->private_data;
+ struct address_space *mapping = inode->i_mapping;
+ loff_t offFile = *poffset;
+ bool fNeedWakeUp = false;
+ cbRet = 0;
+
+ LOCK_PIPE(pPipe);
+
+ for (;;) {
+ unsigned cBufs = pPipe->nrbufs;
+ /*SFLOG2(("vbsf_splice_write: nrbufs=%#x curbuf=%#x\n", cBufs, pPipe->curbuf));*/
+ if (cBufs) {
+ /*
+ * There is data available. Write it to the file.
+ */
+ int vrc;
+ struct pipe_buffer *pPipeBuf = &pPipe->bufs[pPipe->curbuf];
+ uint32_t cPagesToWrite = 1;
+ uint32_t cbToWrite = pPipeBuf->len;
+
+ Assert(pPipeBuf->offset < PAGE_SIZE);
+ Assert(pPipeBuf->offset + pPipeBuf->len <= PAGE_SIZE);
+
+ pReq->PgLst.offFirstPage = pPipeBuf->offset & PAGE_OFFSET;
+ pReq->PgLst.aPages[0] = page_to_phys(pPipeBuf->page);
+
+ /* Add any adjacent page buffers: */
+ while ( cPagesToWrite < cBufs
+ && cPagesToWrite < cMaxPages
+ && ((pReq->PgLst.offFirstPage + cbToWrite) & PAGE_OFFSET_MASK) == 0) {
+# ifdef PIPE_BUFFERS
+ struct pipe_buffer *pPipeBuf2 = &pPipe->bufs[(pPipe->curbuf + cPagesToWrite) % PIPE_BUFFERS];
+# else
+ struct pipe_buffer *pPipeBuf2 = &pPipe->bufs[(pPipe->curbuf + cPagesToWrite) % pPipe->buffers];
+# endif
+ Assert(pPipeBuf2->len <= PAGE_SIZE);
+ Assert(pPipeBuf2->offset < PAGE_SIZE);
+ if (pPipeBuf2->offset != 0)
+ break;
+ pReq->PgLst.aPages[cPagesToWrite] = page_to_phys(pPipeBuf2->page);
+ cbToWrite += pPipeBuf2->len;
+ cPagesToWrite += 1;
+ }
+
+ /* Check that we don't have signals pending before we issue the write, as
+ we'll only end up having to cancel the HGCM request 99% of the time: */
+ if (!signal_pending(current)) {
+ struct vbsf_inode_info *sf_i = VBSF_GET_INODE_INFO(inode);
+ vrc = VbglR0SfHostReqWritePgLst(pSuperInfo->map.root, pReq, sf_r->Handle.hHost, offFile,
+ cbToWrite, cPagesToWrite);
+ sf_i->ModificationTimeAtOurLastWrite = sf_i->ModificationTime;
+ } else
+ vrc = VERR_INTERRUPTED;
+ if (RT_SUCCESS(vrc)) {
+ /*
+ * Get the number of bytes actually written, update file position
+ * and return value, and advance the pipe buffer.
+ */
+ uint32_t cbActual = pReq->Parms.cb32Write.u.value32;
+ AssertStmt(cbActual <= cbToWrite, cbActual = cbToWrite);
+ SFLOG2(("vbsf_splice_write: write -> %#x bytes @ %#RX64\n", cbActual, offFile));
+
+ cbRet += cbActual;
+
+ while (cbActual > 0) {
+ uint32_t cbAdvance = RT_MIN(pPipeBuf->len, cbActual);
+
+ vbsf_reg_write_sync_page_cache(mapping, offFile, cbAdvance, NULL,
+ &pPipeBuf->page, pPipeBuf->offset, 1);
+
+ offFile += cbAdvance;
+ cbActual -= cbAdvance;
+ pPipeBuf->offset += cbAdvance;
+ pPipeBuf->len -= cbAdvance;
+
+ if (!pPipeBuf->len) {
+ struct pipe_buf_operations const *pOps = pPipeBuf->ops;
+ pPipeBuf->ops = NULL;
+ pOps->release(pPipe, pPipeBuf);
+
+# ifdef PIPE_BUFFERS
+ pPipe->curbuf = (pPipe->curbuf + 1) % PIPE_BUFFERS;
+# else
+ pPipe->curbuf = (pPipe->curbuf + 1) % pPipe->buffers;
+# endif
+ pPipe->nrbufs -= 1;
+ pPipeBuf = &pPipe->bufs[pPipe->curbuf];
+
+# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30)
+ fNeedWakeUp |= pPipe->inode != NULL;
+# else
+ fNeedWakeUp = true;
+# endif
+ } else {
+ Assert(cbActual == 0);
+ break;
+ }
+ }
+
+ *poffset = offFile;
+ } else {
+ if (cbRet == 0)
+ cbRet = vrc == VERR_INTERRUPTED ? -ERESTARTSYS : -RTErrConvertToErrno(vrc);
+ SFLOGFLOW(("vbsf_splice_write: Write failed: %Rrc -> %zd (cbRet=%#zx)\n",
+ vrc, -RTErrConvertToErrno(vrc), cbRet));
+ break;
+ }
+ } else {
+ /*
+ * Wait for data to become available, if there is chance that'll happen.
+ */
+ /* Quit if there are no writers (think EOF): */
+ if (pPipe->writers == 0) {
+ SFLOGFLOW(("vbsf_splice_write: No buffers. No writers. The show is done!\n"));
+ break;
+ }
+
+ /* Quit if if we've written some and no writers waiting on the lock: */
+ if (cbRet > 0 && pPipe->waiting_writers == 0) {
+ SFLOGFLOW(("vbsf_splice_write: No waiting writers, returning what we've got.\n"));
+ break;
+ }
+
+ /* Quit with EAGAIN if non-blocking: */
+ if (flags & SPLICE_F_NONBLOCK) {
+ if (cbRet == 0)
+ cbRet = -EAGAIN;
+ break;
+ }
+
+ /* Quit if we've got pending signals: */
+ if (signal_pending(current)) {
+ if (cbRet == 0)
+ cbRet = -ERESTARTSYS;
+ SFLOGFLOW(("vbsf_splice_write: pending signal! (%zd)\n", cbRet));
+ break;
+ }
+
+ /* Wake up writers before we start waiting: */
+ if (fNeedWakeUp) {
+ vbsf_wake_up_pipe(pPipe, false /*fReaders*/);
+ fNeedWakeUp = false;
+ }
+ vbsf_wait_pipe(pPipe);
+ }
+ } /* feed loop */
+
+ if (fNeedWakeUp)
+ vbsf_wake_up_pipe(pPipe, false /*fReaders*/);
+
+ UNLOCK_PIPE(pPipe);
+
+ VbglR0PhysHeapFree(pReq);
+ } else {
+ cbRet = -ENOMEM;
+ }
+ }
+ SFLOGFLOW(("vbsf_splice_write: returns %zd (%#zx), *poffset=%#RX64\n", cbRet, cbRet, *poffset));
+ return cbRet;
+}
+
+#endif /* 2.6.17 <= LINUX_VERSION_CODE < 3.16.0 */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 30) \
+ && LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 23)
+/**
+ * Our own senfile implementation that does not go via the page cache like
+ * generic_file_sendfile() does.
+ */
+static ssize_t vbsf_reg_sendfile(struct file *pFile, loff_t *poffFile, size_t cbToSend, read_actor_t pfnActor,
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 8)
+ void *pvUser
+# else
+ void __user *pvUser
+# endif
+ )
+{
+ struct inode *inode = VBSF_GET_F_DENTRY(pFile)->d_inode;
+ struct vbsf_super_info *pSuperInfo = VBSF_GET_SUPER_INFO(inode->i_sb);
+ ssize_t cbRet;
+ SFLOGFLOW(("vbsf_reg_sendfile: pFile=%p poffFile=%p{%#RX64} cbToSend=%#zx pfnActor=%p pvUser=%p\n",
+ pFile, poffFile, poffFile ? *poffFile : 0, cbToSend, pfnActor, pvUser));
+ Assert(pSuperInfo);
+
+ /*
+ * Return immediately if asked to send nothing.
+ */
+ if (cbToSend == 0)
+ return 0;
+
+ /*
+ * Like for vbsf_reg_read() and vbsf_reg_read_iter(), we allow going via
+ * the page cache in some cases or configs.
+ */
+ if (vbsf_should_use_cached_read(pFile, inode->i_mapping, pSuperInfo)) {
+ cbRet = generic_file_sendfile(pFile, poffFile, cbToSend, pfnActor, pvUser);
+ SFLOGFLOW(("vbsf_reg_sendfile: returns %#zx *poffFile=%#RX64 [generic_file_sendfile]\n", cbRet, poffFile ? *poffFile : UINT64_MAX));
+ } else {
+ /*
+ * Allocate a request and a bunch of pages for reading from the file.
+ */
+ struct page *apPages[16];
+ loff_t offFile = poffFile ? *poffFile : 0;
+ size_t const cPages = cbToSend + ((size_t)offFile & PAGE_OFFSET_MASK) >= RT_ELEMENTS(apPages) * PAGE_SIZE
+ ? RT_ELEMENTS(apPages)
+ : RT_ALIGN_Z(cbToSend + ((size_t)offFile & PAGE_OFFSET_MASK), PAGE_SIZE) >> PAGE_SHIFT;
+ size_t iPage;
+ VBOXSFREADPGLSTREQ *pReq = (VBOXSFREADPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFREADPGLSTREQ,
+ PgLst.aPages[cPages]));
+ if (pReq) {
+ Assert(cPages > 0);
+ cbRet = 0;
+ for (iPage = 0; iPage < cPages; iPage++) {
+ struct page *pPage;
+ apPages[iPage] = pPage = alloc_page(GFP_USER);
+ if (pPage) {
+ Assert(page_count(pPage) == 1);
+ pReq->PgLst.aPages[iPage] = page_to_phys(pPage);
+ } else {
+ while (iPage-- > 0)
+ vbsf_put_page(apPages[iPage]);
+ cbRet = -ENOMEM;
+ break;
+ }
+ }
+ if (cbRet == 0) {
+ /*
+ * Do the job.
+ */
+ struct vbsf_reg_info *sf_r = (struct vbsf_reg_info *)pFile->private_data;
+ read_descriptor_t RdDesc;
+ RdDesc.count = cbToSend;
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 8)
+ RdDesc.arg.data = pvUser;
+# else
+ RdDesc.buf = pvUser;
+# endif
+ RdDesc.written = 0;
+ RdDesc.error = 0;
+
+ Assert(sf_r);
+ Assert((sf_r->Handle.fFlags & VBSF_HANDLE_F_MAGIC_MASK) == VBSF_HANDLE_F_MAGIC);
+
+ while (cbToSend > 0) {
+ /*
+ * Read another chunk. For paranoid reasons, we keep data where the page cache
+ * would keep it, i.e. page offset bits corresponds to the file offset bits.
+ */
+ uint32_t const offPg0 = (uint32_t)offFile & (uint32_t)PAGE_OFFSET_MASK;
+ uint32_t const cbToRead = RT_MIN((cPages << PAGE_SHIFT) - offPg0, cbToSend);
+ uint32_t const cPagesToRead = RT_ALIGN_Z(cbToRead + offPg0, PAGE_SIZE) >> PAGE_SHIFT;
+ int vrc;
+ pReq->PgLst.offFirstPage = (uint16_t)offPg0;
+ if (!signal_pending(current))
+ vrc = VbglR0SfHostReqReadPgLst(pSuperInfo->map.root, pReq, sf_r->Handle.hHost, offFile,
+ cbToRead, cPagesToRead);
+ else
+ vrc = VERR_INTERRUPTED;
+ if (RT_SUCCESS(vrc)) {
+ /*
+ * Pass what we read to the actor.
+ */
+ uint32_t off = offPg0;
+ uint32_t cbActual = pReq->Parms.cb32Read.u.value32;
+ bool const fIsEof = cbActual < cbToRead;
+ AssertStmt(cbActual <= cbToRead, cbActual = cbToRead);
+ SFLOG3(("vbsf_reg_sendfile: Read %#x bytes (offPg0=%#x), wanted %#x ...\n", cbActual, offPg0, cbToRead));
+
+ iPage = 0;
+ while (cbActual > 0) {
+ uint32_t const cbPage = RT_MIN(cbActual, PAGE_SIZE - off);
+ int const cbRetActor = pfnActor(&RdDesc, apPages[iPage], off, cbPage);
+ Assert(cbRetActor >= 0); /* Returns zero on failure, with RdDesc.error holding the status code. */
+
+ AssertMsg(iPage < cPages && iPage < cPagesToRead, ("iPage=%#x cPages=%#x cPagesToRead=%#x\n", iPage, cPages, cPagesToRead));
+
+ offFile += cbRetActor;
+ if ((uint32_t)cbRetActor == cbPage && RdDesc.count > 0) {
+ cbActual -= cbPage;
+ cbToSend -= cbPage;
+ iPage++;
+ } else {
+ SFLOG3(("vbsf_reg_sendfile: cbRetActor=%#x (%d) cbPage=%#x RdDesc{count=%#lx error=%d} iPage=%#x/%#x/%#x cbToSend=%#zx\n",
+ cbRetActor, cbRetActor, cbPage, RdDesc.count, RdDesc.error, iPage, cPagesToRead, cPages, cbToSend));
+ vrc = VERR_CALLBACK_RETURN;
+ break;
+ }
+ off = 0;
+ }
+
+ /*
+ * Are we done yet?
+ */
+ if (RT_FAILURE_NP(vrc) || cbToSend == 0 || RdDesc.error != 0 || fIsEof) {
+ break;
+ }
+
+ /*
+ * Replace pages held by the actor.
+ */
+ vrc = VINF_SUCCESS;
+ for (iPage = 0; iPage < cPages; iPage++) {
+ struct page *pPage = apPages[iPage];
+ if (page_count(pPage) != 1) {
+ struct page *pNewPage = alloc_page(GFP_USER);
+ if (pNewPage) {
+ SFLOGFLOW(("vbsf_reg_sendfile: Replacing page #%x: %p -> %p\n", iPage, pPage, pNewPage));
+ vbsf_put_page(pPage);
+ apPages[iPage] = pNewPage;
+ } else {
+ SFLOGFLOW(("vbsf_reg_sendfile: Failed to allocate a replacement page.\n"));
+ vrc = VERR_NO_MEMORY;
+ break;
+ }
+ }
+ }
+ if (RT_FAILURE(vrc))
+ break; /* RdDesc.written should be non-zero, so don't bother with setting error. */
+ } else {
+ RdDesc.error = vrc == VERR_INTERRUPTED ? -ERESTARTSYS : -RTErrConvertToErrno(vrc);
+ SFLOGFLOW(("vbsf_reg_sendfile: Read failed: %Rrc -> %zd (RdDesc.error=%#d)\n",
+ vrc, -RTErrConvertToErrno(vrc), RdDesc.error));
+ break;
+ }
+ }
+
+ /*
+ * Free memory.
+ */
+ for (iPage = 0; iPage < cPages; iPage++)
+ vbsf_put_page(apPages[iPage]);
+
+ /*
+ * Set the return values.
+ */
+ if (RdDesc.written) {
+ cbRet = RdDesc.written;
+ if (poffFile)
+ *poffFile = offFile;
+ } else {
+ cbRet = RdDesc.error;
+ }
+ }
+ VbglR0PhysHeapFree(pReq);
+ } else {
+ cbRet = -ENOMEM;
+ }
+ SFLOGFLOW(("vbsf_reg_sendfile: returns %#zx offFile=%#RX64\n", cbRet, offFile));
+ }
+ return cbRet;
+}
+#endif /* 2.5.30 <= LINUX_VERSION_CODE < 2.6.23 */
+
+
+/*********************************************************************************************************************************
+* File operations on regular files *
+*********************************************************************************************************************************/
+
+/** Wrapper around put_page / page_cache_release. */
+DECLINLINE(void) vbsf_put_page(struct page *pPage)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0)
+ put_page(pPage);
+#else
+ page_cache_release(pPage);
+#endif
+}
+
+
+/** Wrapper around get_page / page_cache_get. */
+DECLINLINE(void) vbsf_get_page(struct page *pPage)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0)
+ get_page(pPage);
+#else
+ page_cache_get(pPage);
+#endif
+}
+
+
+/** Companion to vbsf_lock_user_pages(). */
+DECLINLINE(void) vbsf_unlock_user_pages(struct page **papPages, size_t cPages, bool fSetDirty, bool fLockPgHack)
+{
+ /* We don't mark kernel pages dirty: */
+ if (fLockPgHack)
+ fSetDirty = false;
+
+ while (cPages-- > 0)
+ {
+ struct page *pPage = papPages[cPages];
+ Assert((ssize_t)cPages >= 0);
+ if (fSetDirty && !PageReserved(pPage))
+ set_page_dirty(pPage);
+ vbsf_put_page(pPage);
+ }
+}
+
+
+/**
+ * Worker for vbsf_lock_user_pages_failed_check_kernel() and
+ * vbsf_iter_lock_pages().
+ */
+static int vbsf_lock_kernel_pages(uint8_t *pbStart, bool fWrite, size_t cPages, struct page **papPages)
+{
+ uintptr_t const uPtrFrom = (uintptr_t)pbStart;
+ uintptr_t const uPtrLast = (uPtrFrom & ~(uintptr_t)PAGE_OFFSET_MASK) + (cPages << PAGE_SHIFT) - 1;
+ uint8_t *pbPage = (uint8_t *)uPtrLast;
+ size_t iPage = cPages;
+
+ /*
+ * Touch the pages first (paranoia^2).
+ */
+ if (fWrite) {
+ uint8_t volatile *pbProbe = (uint8_t volatile *)uPtrFrom;
+ while (iPage-- > 0) {
+ *pbProbe = *pbProbe;
+ pbProbe += PAGE_SIZE;
+ }
+ } else {
+ uint8_t const *pbProbe = (uint8_t const *)uPtrFrom;
+ while (iPage-- > 0) {
+ ASMProbeReadByte(pbProbe);
+ pbProbe += PAGE_SIZE;
+ }
+ }
+
+ /*
+ * Get the pages.
+ * Note! Fixes here probably applies to rtR0MemObjNativeLockKernel as well.
+ */
+ iPage = cPages;
+ if ( uPtrFrom >= (unsigned long)__va(0)
+ && uPtrLast < (unsigned long)high_memory) {
+ /* The physical page mapping area: */
+ while (iPage-- > 0) {
+ struct page *pPage = papPages[iPage] = virt_to_page(pbPage);
+ vbsf_get_page(pPage);
+ pbPage -= PAGE_SIZE;
+ }
+ } else {
+ /* This is vmalloc or some such thing, so go thru page tables: */
+ while (iPage-- > 0) {
+ struct page *pPage = rtR0MemObjLinuxVirtToPage(pbPage);
+ if (pPage) {
+ papPages[iPage] = pPage;
+ vbsf_get_page(pPage);
+ pbPage -= PAGE_SIZE;
+ } else {
+ while (++iPage < cPages) {
+ pPage = papPages[iPage];
+ vbsf_put_page(pPage);
+ }
+ return -EFAULT;
+ }
+ }
+ }
+ return 0;
+}
+
+
+/**
+ * Catches kernel_read() and kernel_write() calls and works around them.
+ *
+ * The file_operations::read and file_operations::write callbacks supposedly
+ * hands us the user buffers to read into and write out of. To allow the kernel
+ * to read and write without allocating buffers in userland, they kernel_read()
+ * and kernel_write() increases the user space address limit before calling us
+ * so that copyin/copyout won't reject it. Our problem is that get_user_pages()
+ * works on the userspace address space structures and will not be fooled by an
+ * increased addr_limit.
+ *
+ * This code tries to detect this situation and fake get_user_lock() for the
+ * kernel buffer.
+ */
+static int vbsf_lock_user_pages_failed_check_kernel(uintptr_t uPtrFrom, size_t cPages, bool fWrite, int rcFailed,
+ struct page **papPages, bool *pfLockPgHack)
+{
+ /*
+ * Check that this is valid user memory that is actually in the kernel range.
+ */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)
+ if ( access_ok((void *)uPtrFrom, cPages << PAGE_SHIFT)
+ && uPtrFrom >= USER_DS.seg)
+#else
+ if ( access_ok(fWrite ? VERIFY_WRITE : VERIFY_READ, (void *)uPtrFrom, cPages << PAGE_SHIFT)
+ && uPtrFrom >= USER_DS.seg)
+#endif
+ {
+ int rc = vbsf_lock_kernel_pages((uint8_t *)uPtrFrom, fWrite, cPages, papPages);
+ if (rc == 0) {
+ *pfLockPgHack = true;
+ return 0;
+ }
+ }
+
+ return rcFailed;
+}
+
+
+/** Wrapper around get_user_pages. */
+DECLINLINE(int) vbsf_lock_user_pages(uintptr_t uPtrFrom, size_t cPages, bool fWrite, struct page **papPages, bool *pfLockPgHack)
+{
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)
+ ssize_t cPagesLocked = get_user_pages_unlocked(uPtrFrom, cPages, papPages,
+ fWrite ? FOLL_WRITE | FOLL_FORCE : FOLL_FORCE);
+# elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0)
+ ssize_t cPagesLocked = get_user_pages_unlocked(uPtrFrom, cPages, fWrite, 1 /*force*/, papPages);
+# elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 168) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)
+ ssize_t cPagesLocked = get_user_pages_unlocked(current, current->mm, uPtrFrom, cPages, papPages,
+ fWrite ? FOLL_WRITE | FOLL_FORCE : FOLL_FORCE);
+# elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0)
+ ssize_t cPagesLocked = get_user_pages_unlocked(current, current->mm, uPtrFrom, cPages, fWrite, 1 /*force*/, papPages);
+# else
+ struct task_struct *pTask = current;
+ ssize_t cPagesLocked;
+ down_read(&pTask->mm->mmap_sem);
+ cPagesLocked = get_user_pages(pTask, pTask->mm, uPtrFrom, cPages, fWrite, 1 /*force*/, papPages, NULL);
+ up_read(&pTask->mm->mmap_sem);
+# endif
+ *pfLockPgHack = false;
+ if (cPagesLocked == cPages)
+ return 0;
+
+ /*
+ * It failed.
+ */
+ if (cPagesLocked < 0)
+ return vbsf_lock_user_pages_failed_check_kernel(uPtrFrom, cPages, fWrite, (int)cPagesLocked, papPages, pfLockPgHack);
+
+ vbsf_unlock_user_pages(papPages, cPagesLocked, false /*fSetDirty*/, false /*fLockPgHack*/);
+
+ /* We could use uPtrFrom + cPagesLocked to get the correct status here... */
+ return -EFAULT;
+}
+
+
+/**
+ * Read function used when accessing files that are memory mapped.
+ *
+ * We read from the page cache here to present the a cohertent picture of the
+ * the file content.
+ */
+static ssize_t vbsf_reg_read_mapped(struct file *file, char /*__user*/ *buf, size_t size, loff_t *off)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
+ struct iovec iov = { .iov_base = buf, .iov_len = size };
+ struct iov_iter iter;
+ struct kiocb kiocb;
+ ssize_t cbRet;
+
+ init_sync_kiocb(&kiocb, file);
+ kiocb.ki_pos = *off;
+ iov_iter_init(&iter, READ, &iov, 1, size);
+
+ cbRet = generic_file_read_iter(&kiocb, &iter);
+
+ *off = kiocb.ki_pos;
+ return cbRet;
+
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
+ struct iovec iov = { .iov_base = buf, .iov_len = size };
+ struct kiocb kiocb;
+ ssize_t cbRet;
+
+ init_sync_kiocb(&kiocb, file);
+ kiocb.ki_pos = *off;
+
+ cbRet = generic_file_aio_read(&kiocb, &iov, 1, *off);
+ if (cbRet == -EIOCBQUEUED)
+ cbRet = wait_on_sync_kiocb(&kiocb);
+
+ *off = kiocb.ki_pos;
+ return cbRet;
+
+#else /* 2.6.18 or earlier: */
+ return generic_file_read(file, buf, size, off);
+#endif
+}
+
+
+/**
+ * Fallback case of vbsf_reg_read() that locks the user buffers and let the host
+ * write directly to them.
*/
+static ssize_t vbsf_reg_read_locking(struct file *file, char /*__user*/ *buf, size_t size, loff_t *off,
+ struct vbsf_super_info *pSuperInfo, struct vbsf_reg_info *sf_r)
+{
+ /*
+ * Lock pages and execute the read, taking care not to pass the host
+ * more than it can handle in one go or more than we care to allocate
+ * page arrays for. The latter limit is set at just short of 32KB due
+ * to how the physical heap works.
+ */
+ struct page *apPagesStack[16];
+ struct page **papPages = &apPagesStack[0];
+ struct page **papPagesFree = NULL;
+ VBOXSFREADPGLSTREQ *pReq;
+ loff_t offFile = *off;
+ ssize_t cbRet = -ENOMEM;
+ size_t cPages = (((uintptr_t)buf & PAGE_OFFSET_MASK) + size + PAGE_OFFSET_MASK) >> PAGE_SHIFT;
+ size_t cMaxPages = RT_MIN(RT_MAX(pSuperInfo->cMaxIoPages, 1), cPages);
+ bool fLockPgHack;
+
+ pReq = (VBOXSFREADPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFREADPGLSTREQ, PgLst.aPages[cMaxPages]));
+ while (!pReq && cMaxPages > 4) {
+ cMaxPages /= 2;
+ pReq = (VBOXSFREADPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFREADPGLSTREQ, PgLst.aPages[cMaxPages]));
+ }
+ if (pReq && cMaxPages > RT_ELEMENTS(apPagesStack))
+ papPagesFree = papPages = kmalloc(cMaxPages * sizeof(sizeof(papPages[0])), GFP_KERNEL);
+ if (pReq && papPages) {
+ cbRet = 0;
+ for (;;) {
+ /*
+ * Figure out how much to process now and lock the user pages.
+ */
+ int rc;
+ size_t cbChunk = (uintptr_t)buf & PAGE_OFFSET_MASK;
+ pReq->PgLst.offFirstPage = (uint16_t)cbChunk;
+ cPages = RT_ALIGN_Z(cbChunk + size, PAGE_SIZE) >> PAGE_SHIFT;
+ if (cPages <= cMaxPages)
+ cbChunk = size;
+ else {
+ cPages = cMaxPages;
+ cbChunk = (cMaxPages << PAGE_SHIFT) - cbChunk;
+ }
+
+ rc = vbsf_lock_user_pages((uintptr_t)buf, cPages, true /*fWrite*/, papPages, &fLockPgHack);
+ if (rc == 0) {
+ size_t iPage = cPages;
+ while (iPage-- > 0)
+ pReq->PgLst.aPages[iPage] = page_to_phys(papPages[iPage]);
+ } else {
+ cbRet = rc;
+ break;
+ }
+
+ /*
+ * Issue the request and unlock the pages.
+ */
+ rc = VbglR0SfHostReqReadPgLst(pSuperInfo->map.root, pReq, sf_r->Handle.hHost, offFile, cbChunk, cPages);
+
+ Assert(cPages <= cMaxPages);
+ vbsf_unlock_user_pages(papPages, cPages, true /*fSetDirty*/, fLockPgHack);
+
+ if (RT_SUCCESS(rc)) {
+ /*
+ * Success, advance position and buffer.
+ */
+ uint32_t cbActual = pReq->Parms.cb32Read.u.value32;
+ AssertStmt(cbActual <= cbChunk, cbActual = cbChunk);
+ cbRet += cbActual;
+ offFile += cbActual;
+ buf = (uint8_t *)buf + cbActual;
+ size -= cbActual;
+
+ /*
+ * Are we done already? If so commit the new file offset.
+ */
+ if (!size || cbActual < cbChunk) {
+ *off = offFile;
+ break;
+ }
+ } else if (rc == VERR_NO_MEMORY && cMaxPages > 4) {
+ /*
+ * The host probably doesn't have enough heap to handle the
+ * request, reduce the page count and retry.
+ */
+ cMaxPages /= 4;
+ Assert(cMaxPages > 0);
+ } else {
+ /*
+ * If we've successfully read stuff, return it rather than
+ * the error. (Not sure if this is such a great idea...)
+ */
+ if (cbRet > 0) {
+ SFLOGFLOW(("vbsf_reg_read: read at %#RX64 -> %Rrc; got cbRet=%#zx already\n", offFile, rc, cbRet));
+ *off = offFile;
+ } else {
+ SFLOGFLOW(("vbsf_reg_read: read at %#RX64 -> %Rrc\n", offFile, rc));
+ cbRet = -EPROTO;
+ }
+ break;
+ }
+ }
+ }
+ if (papPagesFree)
+ kfree(papPages);
+ if (pReq)
+ VbglR0PhysHeapFree(pReq);
+ SFLOGFLOW(("vbsf_reg_read: returns %zd (%#zx), *off=%RX64 [lock]\n", cbRet, cbRet, *off));
+ return cbRet;
+}
-#include "vfsmod.h"
-static void *alloc_bounce_buffer(size_t * tmp_sizep, PRTCCPHYS physp, size_t
- xfer_size, const char *caller)
-{
- size_t tmp_size;
- void *tmp;
-
- /* try for big first. */
- tmp_size = RT_ALIGN_Z(xfer_size, PAGE_SIZE);
- if (tmp_size > 16U * _1K)
- tmp_size = 16U * _1K;
- tmp = kmalloc(tmp_size, GFP_KERNEL);
- if (!tmp) {
- /* fall back on a page sized buffer. */
- tmp = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (!tmp) {
- LogRel(("%s: could not allocate bounce buffer for xfer_size=%zu %s\n", caller, xfer_size));
- return NULL;
- }
- tmp_size = PAGE_SIZE;
- }
-
- *tmp_sizep = tmp_size;
- *physp = virt_to_phys(tmp);
- return tmp;
-}
-
-static void free_bounce_buffer(void *tmp)
-{
- kfree(tmp);
-}
-
-/* fops */
-static int sf_reg_read_aux(const char *caller, struct sf_glob_info *sf_g,
- struct sf_reg_info *sf_r, void *buf,
- uint32_t * nread, uint64_t pos)
-{
- /** @todo bird: yes, kmap() and kmalloc() input only. Since the buffer is
- * contiguous in physical memory (kmalloc or single page), we should
- * use a physical address here to speed things up. */
- int rc = VbglR0SfRead(&client_handle, &sf_g->map, sf_r->handle,
- pos, nread, buf, false /* already locked? */ );
- if (RT_FAILURE(rc)) {
- LogFunc(("VbglR0SfRead failed. caller=%s, rc=%Rrc\n", caller,
- rc));
- return -EPROTO;
- }
- return 0;
-}
-
-static int sf_reg_write_aux(const char *caller, struct sf_glob_info *sf_g,
- struct sf_reg_info *sf_r, void *buf,
- uint32_t * nwritten, uint64_t pos)
-{
- /** @todo bird: yes, kmap() and kmalloc() input only. Since the buffer is
- * contiguous in physical memory (kmalloc or single page), we should
- * use a physical address here to speed things up. */
- int rc = VbglR0SfWrite(&client_handle, &sf_g->map, sf_r->handle,
- pos, nwritten, buf,
- false /* already locked? */ );
- if (RT_FAILURE(rc)) {
- LogFunc(("VbglR0SfWrite failed. caller=%s, rc=%Rrc\n",
- caller, rc));
- return -EPROTO;
- }
- return 0;
+/**
+ * Read from a regular file.
+ *
+ * @param file the file
+ * @param buf the buffer
+ * @param size length of the buffer
+ * @param off offset within the file (in/out).
+ * @returns the number of read bytes on success, Linux error code otherwise
+ */
+static ssize_t vbsf_reg_read(struct file *file, char /*__user*/ *buf, size_t size, loff_t *off)
+{
+ struct inode *inode = VBSF_GET_F_DENTRY(file)->d_inode;
+ struct vbsf_super_info *pSuperInfo = VBSF_GET_SUPER_INFO(inode->i_sb);
+ struct vbsf_reg_info *sf_r = file->private_data;
+ struct address_space *mapping = inode->i_mapping;
+
+ SFLOGFLOW(("vbsf_reg_read: inode=%p file=%p buf=%p size=%#zx off=%#llx\n", inode, file, buf, size, *off));
+
+ if (!S_ISREG(inode->i_mode)) {
+ LogFunc(("read from non regular file %d\n", inode->i_mode));
+ return -EINVAL;
+ }
+
+ /** @todo XXX Check read permission according to inode->i_mode! */
+
+ if (!size)
+ return 0;
+
+ /*
+ * If there is a mapping and O_DIRECT isn't in effect, we must at a
+ * heed dirty pages in the mapping and read from them. For simplicity
+ * though, we just do page cache reading when there are writable
+ * mappings around with any kind of pages loaded.
+ */
+ if (vbsf_should_use_cached_read(file, mapping, pSuperInfo))
+ return vbsf_reg_read_mapped(file, buf, size, off);
+
+ /*
+ * For small requests, try use an embedded buffer provided we get a heap block
+ * that does not cross page boundraries (see host code).
+ */
+ if (size <= PAGE_SIZE / 4 * 3 - RT_UOFFSETOF(VBOXSFREADEMBEDDEDREQ, abData[0]) /* see allocator */) {
+ uint32_t const cbReq = RT_UOFFSETOF(VBOXSFREADEMBEDDEDREQ, abData[0]) + size;
+ VBOXSFREADEMBEDDEDREQ *pReq = (VBOXSFREADEMBEDDEDREQ *)VbglR0PhysHeapAlloc(cbReq);
+ if (pReq) {
+ if ((PAGE_SIZE - ((uintptr_t)pReq & PAGE_OFFSET_MASK)) >= cbReq) {
+ ssize_t cbRet;
+ int vrc = VbglR0SfHostReqReadEmbedded(pSuperInfo->map.root, pReq, sf_r->Handle.hHost, *off, (uint32_t)size);
+ if (RT_SUCCESS(vrc)) {
+ cbRet = pReq->Parms.cb32Read.u.value32;
+ AssertStmt(cbRet <= (ssize_t)size, cbRet = size);
+ if (copy_to_user(buf, pReq->abData, cbRet) == 0)
+ *off += cbRet;
+ else
+ cbRet = -EFAULT;
+ } else
+ cbRet = -EPROTO;
+ VbglR0PhysHeapFree(pReq);
+ SFLOGFLOW(("vbsf_reg_read: returns %zd (%#zx), *off=%RX64 [embed]\n", cbRet, cbRet, *off));
+ return cbRet;
+ }
+ VbglR0PhysHeapFree(pReq);
+ }
+ }
+
+#if 0 /* Turns out this is slightly slower than locking the pages even for 4KB reads (4.19/amd64). */
+ /*
+ * For medium sized requests try use a bounce buffer.
+ */
+ if (size <= _64K /** @todo make this configurable? */) {
+ void *pvBounce = kmalloc(size, GFP_KERNEL);
+ if (pvBounce) {
+ VBOXSFREADPGLSTREQ *pReq = (VBOXSFREADPGLSTREQ *)VbglR0PhysHeapAlloc(sizeof(*pReq));
+ if (pReq) {
+ ssize_t cbRet;
+ int vrc = VbglR0SfHostReqReadContig(pSuperInfo->map.root, pReq, sf_r->Handle.hHost, *off,
+ (uint32_t)size, pvBounce, virt_to_phys(pvBounce));
+ if (RT_SUCCESS(vrc)) {
+ cbRet = pReq->Parms.cb32Read.u.value32;
+ AssertStmt(cbRet <= (ssize_t)size, cbRet = size);
+ if (copy_to_user(buf, pvBounce, cbRet) == 0)
+ *off += cbRet;
+ else
+ cbRet = -EFAULT;
+ } else
+ cbRet = -EPROTO;
+ VbglR0PhysHeapFree(pReq);
+ kfree(pvBounce);
+ SFLOGFLOW(("vbsf_reg_read: returns %zd (%#zx), *off=%RX64 [bounce]\n", cbRet, cbRet, *off));
+ return cbRet;
+ }
+ kfree(pvBounce);
+ }
+ }
+#endif
+
+ return vbsf_reg_read_locking(file, buf, size, off, pSuperInfo, sf_r);
}
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23) \
- && LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
-void free_pipebuf(struct page *kpage)
+/**
+ * Helper the synchronizes the page cache content with something we just wrote
+ * to the host.
+ */
+static void vbsf_reg_write_sync_page_cache(struct address_space *mapping, loff_t offFile, uint32_t cbRange,
+ uint8_t const *pbSrcBuf, struct page **papSrcPages,
+ uint32_t offSrcPage, size_t cSrcPages)
{
- kunmap(kpage);
- __free_pages(kpage, 0);
+ Assert(offSrcPage < PAGE_SIZE);
+ if (mapping && mapping->nrpages > 0) {
+ /*
+ * Work the pages in the write range.
+ */
+ while (cbRange > 0) {
+ /*
+ * Lookup the page at offFile. We're fine if there aren't
+ * any there. We're skip if it's dirty or is being written
+ * back, at least for now.
+ */
+ size_t const offDstPage = offFile & PAGE_OFFSET_MASK;
+ size_t const cbToCopy = RT_MIN(PAGE_SIZE - offDstPage, cbRange);
+ pgoff_t const idxPage = offFile >> PAGE_SHIFT;
+ struct page *pDstPage = find_lock_page(mapping, idxPage);
+ if (pDstPage) {
+ if ( pDstPage->mapping == mapping /* ignore if re-purposed (paranoia) */
+ && pDstPage->index == idxPage
+ && !PageDirty(pDstPage) /* ignore if dirty */
+ && !PageWriteback(pDstPage) /* ignore if being written back */ ) {
+ /*
+ * Map the page and do the copying.
+ */
+ uint8_t *pbDst = (uint8_t *)kmap(pDstPage);
+ if (pbSrcBuf)
+ memcpy(&pbDst[offDstPage], pbSrcBuf, cbToCopy);
+ else {
+ uint32_t const cbSrc0 = PAGE_SIZE - offSrcPage;
+ uint8_t const *pbSrc = (uint8_t const *)kmap(papSrcPages[0]);
+ AssertMsg(cSrcPages >= 1, ("offFile=%#llx cbRange=%#zx cbToCopy=%#zx\n", offFile, cbRange, cbToCopy));
+ memcpy(&pbDst[offDstPage], &pbSrc[offSrcPage], RT_MIN(cbToCopy, cbSrc0));
+ kunmap(papSrcPages[0]);
+ if (cbToCopy > cbSrc0) {
+ AssertMsg(cSrcPages >= 2, ("offFile=%#llx cbRange=%#zx cbToCopy=%#zx\n", offFile, cbRange, cbToCopy));
+ pbSrc = (uint8_t const *)kmap(papSrcPages[1]);
+ memcpy(&pbDst[offDstPage + cbSrc0], pbSrc, cbToCopy - cbSrc0);
+ kunmap(papSrcPages[1]);
+ }
+ }
+ kunmap(pDstPage);
+ flush_dcache_page(pDstPage);
+ if (cbToCopy == PAGE_SIZE)
+ SetPageUptodate(pDstPage);
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 10)
+ mark_page_accessed(pDstPage);
+# endif
+ } else
+ SFLOGFLOW(("vbsf_reg_write_sync_page_cache: Skipping page %p: mapping=%p (vs %p) writeback=%d offset=%#lx (vs%#lx)\n",
+ pDstPage, pDstPage->mapping, mapping, PageWriteback(pDstPage), pDstPage->index, idxPage));
+ unlock_page(pDstPage);
+ vbsf_put_page(pDstPage);
+ }
+
+ /*
+ * Advance.
+ */
+ if (pbSrcBuf)
+ pbSrcBuf += cbToCopy;
+ else
+ {
+ offSrcPage += cbToCopy;
+ Assert(offSrcPage < PAGE_SIZE * 2);
+ if (offSrcPage >= PAGE_SIZE) {
+ offSrcPage &= PAGE_OFFSET_MASK;
+ papSrcPages++;
+# ifdef VBOX_STRICT
+ Assert(cSrcPages > 0);
+ cSrcPages--;
+# endif
+ }
+ }
+ offFile += cbToCopy;
+ cbRange -= cbToCopy;
+ }
+ }
+ RT_NOREF(cSrcPages);
}
-void *sf_pipe_buf_map(struct pipe_inode_info *pipe,
- struct pipe_buffer *pipe_buf, int atomic)
+
+/**
+ * Fallback case of vbsf_reg_write() that locks the user buffers and let the host
+ * write directly to them.
+ */
+static ssize_t vbsf_reg_write_locking(struct file *file, const char /*__user*/ *buf, size_t size, loff_t *off, loff_t offFile,
+ struct inode *inode, struct vbsf_inode_info *sf_i,
+ struct vbsf_super_info *pSuperInfo, struct vbsf_reg_info *sf_r)
{
- return 0;
+ /*
+ * Lock pages and execute the write, taking care not to pass the host
+ * more than it can handle in one go or more than we care to allocate
+ * page arrays for. The latter limit is set at just short of 32KB due
+ * to how the physical heap works.
+ */
+ struct page *apPagesStack[16];
+ struct page **papPages = &apPagesStack[0];
+ struct page **papPagesFree = NULL;
+ VBOXSFWRITEPGLSTREQ *pReq;
+ ssize_t cbRet = -ENOMEM;
+ size_t cPages = (((uintptr_t)buf & PAGE_OFFSET_MASK) + size + PAGE_OFFSET_MASK) >> PAGE_SHIFT;
+ size_t cMaxPages = RT_MIN(RT_MAX(pSuperInfo->cMaxIoPages, 1), cPages);
+ bool fLockPgHack;
+
+ pReq = (VBOXSFWRITEPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFWRITEPGLSTREQ, PgLst.aPages[cMaxPages]));
+ while (!pReq && cMaxPages > 4) {
+ cMaxPages /= 2;
+ pReq = (VBOXSFWRITEPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFWRITEPGLSTREQ, PgLst.aPages[cMaxPages]));
+ }
+ if (pReq && cMaxPages > RT_ELEMENTS(apPagesStack))
+ papPagesFree = papPages = kmalloc(cMaxPages * sizeof(sizeof(papPages[0])), GFP_KERNEL);
+ if (pReq && papPages) {
+ cbRet = 0;
+ for (;;) {
+ /*
+ * Figure out how much to process now and lock the user pages.
+ */
+ int rc;
+ size_t cbChunk = (uintptr_t)buf & PAGE_OFFSET_MASK;
+ pReq->PgLst.offFirstPage = (uint16_t)cbChunk;
+ cPages = RT_ALIGN_Z(cbChunk + size, PAGE_SIZE) >> PAGE_SHIFT;
+ if (cPages <= cMaxPages)
+ cbChunk = size;
+ else {
+ cPages = cMaxPages;
+ cbChunk = (cMaxPages << PAGE_SHIFT) - cbChunk;
+ }
+
+ rc = vbsf_lock_user_pages((uintptr_t)buf, cPages, false /*fWrite*/, papPages, &fLockPgHack);
+ if (rc == 0) {
+ size_t iPage = cPages;
+ while (iPage-- > 0)
+ pReq->PgLst.aPages[iPage] = page_to_phys(papPages[iPage]);
+ } else {
+ cbRet = rc;
+ break;
+ }
+
+ /*
+ * Issue the request and unlock the pages.
+ */
+ rc = VbglR0SfHostReqWritePgLst(pSuperInfo->map.root, pReq, sf_r->Handle.hHost, offFile, cbChunk, cPages);
+ sf_i->ModificationTimeAtOurLastWrite = sf_i->ModificationTime;
+ if (RT_SUCCESS(rc)) {
+ /*
+ * Success, advance position and buffer.
+ */
+ uint32_t cbActual = pReq->Parms.cb32Write.u.value32;
+ AssertStmt(cbActual <= cbChunk, cbActual = cbChunk);
+
+ vbsf_reg_write_sync_page_cache(inode->i_mapping, offFile, cbActual, NULL /*pbKrnlBuf*/,
+ papPages, (uintptr_t)buf & PAGE_OFFSET_MASK, cPages);
+ Assert(cPages <= cMaxPages);
+ vbsf_unlock_user_pages(papPages, cPages, false /*fSetDirty*/, fLockPgHack);
+
+ cbRet += cbActual;
+ offFile += cbActual;
+ buf = (uint8_t *)buf + cbActual;
+ size -= cbActual;
+ if (offFile > i_size_read(inode))
+ i_size_write(inode, offFile);
+ sf_i->force_restat = 1; /* mtime (and size) may have changed */
+
+ /*
+ * Are we done already? If so commit the new file offset.
+ */
+ if (!size || cbActual < cbChunk) {
+ *off = offFile;
+ break;
+ }
+ } else {
+ vbsf_unlock_user_pages(papPages, cPages, false /*fSetDirty*/, fLockPgHack);
+ if (rc == VERR_NO_MEMORY && cMaxPages > 4) {
+ /*
+ * The host probably doesn't have enough heap to handle the
+ * request, reduce the page count and retry.
+ */
+ cMaxPages /= 4;
+ Assert(cMaxPages > 0);
+ } else {
+ /*
+ * If we've successfully written stuff, return it rather than
+ * the error. (Not sure if this is such a great idea...)
+ */
+ if (cbRet > 0) {
+ SFLOGFLOW(("vbsf_reg_write: write at %#RX64 -> %Rrc; got cbRet=%#zx already\n", offFile, rc, cbRet));
+ *off = offFile;
+ } else {
+ SFLOGFLOW(("vbsf_reg_write: write at %#RX64 -> %Rrc\n", offFile, rc));
+ cbRet = -EPROTO;
+ }
+ break;
+ }
+ }
+ }
+ }
+ if (papPagesFree)
+ kfree(papPages);
+ if (pReq)
+ VbglR0PhysHeapFree(pReq);
+ SFLOGFLOW(("vbsf_reg_write: returns %zd (%#zx), *off=%RX64 [lock]\n", cbRet, cbRet, *off));
+ return cbRet;
}
-void sf_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *pipe_buf)
+
+/**
+ * Write to a regular file.
+ *
+ * @param file the file
+ * @param buf the buffer
+ * @param size length of the buffer
+ * @param off offset within the file
+ * @returns the number of written bytes on success, Linux error code otherwise
+ */
+static ssize_t vbsf_reg_write(struct file *file, const char *buf, size_t size, loff_t * off)
{
+ struct inode *inode = VBSF_GET_F_DENTRY(file)->d_inode;
+ struct vbsf_inode_info *sf_i = VBSF_GET_INODE_INFO(inode);
+ struct vbsf_super_info *pSuperInfo = VBSF_GET_SUPER_INFO(inode->i_sb);
+ struct vbsf_reg_info *sf_r = file->private_data;
+ struct address_space *mapping = inode->i_mapping;
+ loff_t pos;
+
+ SFLOGFLOW(("vbsf_reg_write: inode=%p file=%p buf=%p size=%#zx off=%#llx\n", inode, file, buf, size, *off));
+ Assert(sf_i);
+ Assert(pSuperInfo);
+ Assert(sf_r);
+ AssertReturn(S_ISREG(inode->i_mode), -EINVAL);
+
+ pos = *off;
+ /** @todo This should be handled by the host, it returning the new file
+ * offset when appending. We may have an outdated i_size value here! */
+ if (file->f_flags & O_APPEND)
+ pos = i_size_read(inode);
+
+ /** @todo XXX Check write permission according to inode->i_mode! */
+
+ if (!size) {
+ if (file->f_flags & O_APPEND) /** @todo check if this is the consensus behavior... */
+ *off = pos;
+ return 0;
+ }
+
+ /** @todo Implement the read-write caching mode. */
+
+ /*
+ * If there are active writable mappings, coordinate with any
+ * pending writes via those.
+ */
+ if ( mapping
+ && mapping->nrpages > 0
+ && mapping_writably_mapped(mapping)) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
+ int err = filemap_fdatawait_range(mapping, pos, pos + size - 1);
+ if (err)
+ return err;
+#else
+ /** @todo ... */
+#endif
+ }
+
+ /*
+ * For small requests, try use an embedded buffer provided we get a heap block
+ * that does not cross page boundraries (see host code).
+ */
+ if (size <= PAGE_SIZE / 4 * 3 - RT_UOFFSETOF(VBOXSFWRITEEMBEDDEDREQ, abData[0]) /* see allocator */) {
+ uint32_t const cbReq = RT_UOFFSETOF(VBOXSFWRITEEMBEDDEDREQ, abData[0]) + size;
+ VBOXSFWRITEEMBEDDEDREQ *pReq = (VBOXSFWRITEEMBEDDEDREQ *)VbglR0PhysHeapAlloc(cbReq);
+ if ( pReq
+ && (PAGE_SIZE - ((uintptr_t)pReq & PAGE_OFFSET_MASK)) >= cbReq) {
+ ssize_t cbRet;
+ if (copy_from_user(pReq->abData, buf, size) == 0) {
+ int vrc = VbglR0SfHostReqWriteEmbedded(pSuperInfo->map.root, pReq, sf_r->Handle.hHost,
+ pos, (uint32_t)size);
+ sf_i->ModificationTimeAtOurLastWrite = sf_i->ModificationTime;
+ if (RT_SUCCESS(vrc)) {
+ cbRet = pReq->Parms.cb32Write.u.value32;
+ AssertStmt(cbRet <= (ssize_t)size, cbRet = size);
+ vbsf_reg_write_sync_page_cache(mapping, pos, (uint32_t)cbRet, pReq->abData,
+ NULL /*papSrcPages*/, 0 /*offSrcPage0*/, 0 /*cSrcPages*/);
+ pos += cbRet;
+ *off = pos;
+ if (pos > i_size_read(inode))
+ i_size_write(inode, pos);
+ } else
+ cbRet = -EPROTO;
+ sf_i->force_restat = 1; /* mtime (and size) may have changed */
+ } else
+ cbRet = -EFAULT;
+
+ VbglR0PhysHeapFree(pReq);
+ SFLOGFLOW(("vbsf_reg_write: returns %zd (%#zx), *off=%RX64 [embed]\n", cbRet, cbRet, *off));
+ return cbRet;
+ }
+ if (pReq)
+ VbglR0PhysHeapFree(pReq);
+ }
+
+#if 0 /* Turns out this is slightly slower than locking the pages even for 4KB reads (4.19/amd64). */
+ /*
+ * For medium sized requests try use a bounce buffer.
+ */
+ if (size <= _64K /** @todo make this configurable? */) {
+ void *pvBounce = kmalloc(size, GFP_KERNEL);
+ if (pvBounce) {
+ if (copy_from_user(pvBounce, buf, size) == 0) {
+ VBOXSFWRITEPGLSTREQ *pReq = (VBOXSFWRITEPGLSTREQ *)VbglR0PhysHeapAlloc(sizeof(*pReq));
+ if (pReq) {
+ ssize_t cbRet;
+ int vrc = VbglR0SfHostReqWriteContig(pSuperInfo->map.root, pReq, sf_r->handle, pos,
+ (uint32_t)size, pvBounce, virt_to_phys(pvBounce));
+ sf_i->ModificationTimeAtOurLastWrite = sf_i->ModificationTime;
+ if (RT_SUCCESS(vrc)) {
+ cbRet = pReq->Parms.cb32Write.u.value32;
+ AssertStmt(cbRet <= (ssize_t)size, cbRet = size);
+ vbsf_reg_write_sync_page_cache(mapping, pos, (uint32_t)cbRet, (uint8_t const *)pvBounce,
+ NULL /*papSrcPages*/, 0 /*offSrcPage0*/, 0 /*cSrcPages*/);
+ pos += cbRet;
+ *off = pos;
+ if (pos > i_size_read(inode))
+ i_size_write(inode, pos);
+ } else
+ cbRet = -EPROTO;
+ sf_i->force_restat = 1; /* mtime (and size) may have changed */
+ VbglR0PhysHeapFree(pReq);
+ kfree(pvBounce);
+ SFLOGFLOW(("vbsf_reg_write: returns %zd (%#zx), *off=%RX64 [bounce]\n", cbRet, cbRet, *off));
+ return cbRet;
+ }
+ kfree(pvBounce);
+ } else {
+ kfree(pvBounce);
+ SFLOGFLOW(("vbsf_reg_write: returns -EFAULT, *off=%RX64 [bounce]\n", *off));
+ return -EFAULT;
+ }
+ }
+ }
+#endif
+
+ return vbsf_reg_write_locking(file, buf, size, off, pos, inode, sf_i, pSuperInfo, sf_r);
}
-void sf_pipe_buf_unmap(struct pipe_inode_info *pipe,
- struct pipe_buffer *pipe_buf, void *map_data)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
+
+/**
+ * Companion to vbsf_iter_lock_pages().
+ */
+DECLINLINE(void) vbsf_iter_unlock_pages(struct iov_iter *iter, struct page **papPages, size_t cPages, bool fSetDirty)
{
+ /* We don't mark kernel pages dirty: */
+ if (iter->type & ITER_KVEC)
+ fSetDirty = false;
+
+ while (cPages-- > 0)
+ {
+ struct page *pPage = papPages[cPages];
+ if (fSetDirty && !PageReserved(pPage))
+ set_page_dirty(pPage);
+ vbsf_put_page(pPage);
+ }
}
-int sf_pipe_buf_steal(struct pipe_inode_info *pipe,
- struct pipe_buffer *pipe_buf)
+
+/**
+ * Locks up to @a cMaxPages from the I/O vector iterator, advancing the
+ * iterator.
+ *
+ * @returns 0 on success, negative errno value on failure.
+ * @param iter The iterator to lock pages from.
+ * @param fWrite Whether to write (true) or read (false) lock the pages.
+ * @param pStash Where we stash peek results.
+ * @param cMaxPages The maximum number of pages to get.
+ * @param papPages Where to return the locked pages.
+ * @param pcPages Where to return the number of pages.
+ * @param poffPage0 Where to return the offset into the first page.
+ * @param pcbChunk Where to return the number of bytes covered.
+ */
+static int vbsf_iter_lock_pages(struct iov_iter *iter, bool fWrite, struct vbsf_iter_stash *pStash, size_t cMaxPages,
+ struct page **papPages, size_t *pcPages, size_t *poffPage0, size_t *pcbChunk)
{
- return 0;
+ size_t cbChunk = 0;
+ size_t cPages = 0;
+ size_t offPage0 = 0;
+ int rc = 0;
+
+ Assert(iov_iter_count(iter) + pStash->cb > 0);
+ if (!(iter->type & ITER_KVEC)) {
+ /*
+ * Do we have a stashed page?
+ */
+ if (pStash->pPage) {
+ papPages[0] = pStash->pPage;
+ offPage0 = pStash->off;
+ cbChunk = pStash->cb;
+ cPages = 1;
+ pStash->pPage = NULL;
+ pStash->off = 0;
+ pStash->cb = 0;
+ if ( offPage0 + cbChunk < PAGE_SIZE
+ || iov_iter_count(iter) == 0) {
+ *poffPage0 = offPage0;
+ *pcbChunk = cbChunk;
+ *pcPages = cPages;
+ SFLOGFLOW(("vbsf_iter_lock_pages: returns %d - cPages=%#zx offPage0=%#zx cbChunk=%zx (stashed)\n",
+ rc, cPages, offPage0, cbChunk));
+ return 0;
+ }
+ cMaxPages -= 1;
+ SFLOG3(("vbsf_iter_lock_pages: Picked up stashed page: %#zx LB %#zx\n", offPage0, cbChunk));
+ } else {
+# if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
+ /*
+ * Copy out our starting point to assist rewinding.
+ */
+ pStash->offFromEnd = iov_iter_count(iter);
+ pStash->Copy = *iter;
+# endif
+ }
+
+ /*
+ * Get pages segment by segment.
+ */
+ do {
+ /*
+ * Make a special case of the first time thru here, since that's
+ * the most typical scenario.
+ */
+ ssize_t cbSegRet;
+ if (cPages == 0) {
+# if LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0)
+ while (!iov_iter_single_seg_count(iter)) /* Old code didn't skip empty segments which caused EFAULTs. */
+ iov_iter_advance(iter, 0);
+# endif
+ cbSegRet = iov_iter_get_pages(iter, papPages, iov_iter_count(iter), cMaxPages, &offPage0);
+ if (cbSegRet > 0) {
+ iov_iter_advance(iter, cbSegRet);
+ cbChunk = (size_t)cbSegRet;
+ cPages = RT_ALIGN_Z(offPage0 + cbSegRet, PAGE_SIZE) >> PAGE_SHIFT;
+ cMaxPages -= cPages;
+ SFLOG3(("vbsf_iter_lock_pages: iov_iter_get_pages -> %#zx @ %#zx; %#zx pages [first]\n", cbSegRet, offPage0, cPages));
+ if ( cMaxPages == 0
+ || ((offPage0 + (size_t)cbSegRet) & PAGE_OFFSET_MASK))
+ break;
+ } else {
+ AssertStmt(cbSegRet < 0, cbSegRet = -EFAULT);
+ rc = (int)cbSegRet;
+ break;
+ }
+ } else {
+ /*
+ * Probe first page of new segment to check that we've got a zero offset and
+ * can continue on the current chunk. Stash the page if the offset isn't zero.
+ */
+ size_t offPgProbe;
+ size_t cbSeg = iov_iter_single_seg_count(iter);
+ while (!cbSeg) {
+ iov_iter_advance(iter, 0);
+ cbSeg = iov_iter_single_seg_count(iter);
+ }
+ cbSegRet = iov_iter_get_pages(iter, &papPages[cPages], iov_iter_count(iter), 1, &offPgProbe);
+ if (cbSegRet > 0) {
+ iov_iter_advance(iter, cbSegRet); /** @todo maybe not do this if we stash the page? */
+ Assert(offPgProbe + cbSegRet <= PAGE_SIZE);
+ if (offPgProbe == 0) {
+ cbChunk += cbSegRet;
+ cPages += 1;
+ cMaxPages -= 1;
+ SFLOG3(("vbsf_iter_lock_pages: iov_iter_get_pages(1) -> %#zx @ %#zx\n", cbSegRet, offPgProbe));
+ if ( cMaxPages == 0
+ || cbSegRet != PAGE_SIZE)
+ break;
+
+ /*
+ * Get the rest of the segment (if anything remaining).
+ */
+ cbSeg -= cbSegRet;
+ if (cbSeg > 0) {
+ cbSegRet = iov_iter_get_pages(iter, &papPages[cPages], iov_iter_count(iter), cMaxPages, &offPgProbe);
+ if (cbSegRet > 0) {
+ size_t const cPgRet = RT_ALIGN_Z((size_t)cbSegRet, PAGE_SIZE) >> PAGE_SHIFT;
+ Assert(offPgProbe == 0);
+ iov_iter_advance(iter, cbSegRet);
+ SFLOG3(("vbsf_iter_lock_pages: iov_iter_get_pages() -> %#zx; %#zx pages\n", cbSegRet, cPgRet));
+ cPages += cPgRet;
+ cMaxPages -= cPgRet;
+ cbChunk += cbSegRet;
+ if ( cMaxPages == 0
+ || ((size_t)cbSegRet & PAGE_OFFSET_MASK))
+ break;
+ } else {
+ AssertStmt(cbSegRet < 0, cbSegRet = -EFAULT);
+ rc = (int)cbSegRet;
+ break;
+ }
+ }
+ } else {
+ /* The segment didn't start at a page boundrary, so stash it for
+ the next round: */
+ SFLOGFLOW(("vbsf_iter_lock_pages: iov_iter_get_pages(1) -> %#zx @ %#zx; stashed\n", cbSegRet, offPgProbe));
+ Assert(papPages[cPages]);
+ pStash->pPage = papPages[cPages];
+ pStash->off = offPgProbe;
+ pStash->cb = cbSegRet;
+ break;
+ }
+ } else {
+ AssertStmt(cbSegRet < 0, cbSegRet = -EFAULT);
+ rc = (int)cbSegRet;
+ break;
+ }
+ }
+ Assert(cMaxPages > 0);
+ } while (iov_iter_count(iter) > 0);
+
+ } else {
+ /*
+ * The silly iov_iter_get_pages_alloc() function doesn't handle KVECs,
+ * so everyone needs to do that by themselves.
+ *
+ * Note! Fixes here may apply to rtR0MemObjNativeLockKernel()
+ * and vbsf_lock_user_pages_failed_check_kernel() as well.
+ */
+# if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
+ pStash->offFromEnd = iov_iter_count(iter);
+ pStash->Copy = *iter;
+# endif
+ do {
+ uint8_t *pbBuf;
+ size_t offStart;
+ size_t cPgSeg;
+
+ size_t cbSeg = iov_iter_single_seg_count(iter);
+ while (!cbSeg) {
+ iov_iter_advance(iter, 0);
+ cbSeg = iov_iter_single_seg_count(iter);
+ }
+
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)
+ pbBuf = iter->kvec->iov_base + iter->iov_offset;
+# else
+ pbBuf = iter->iov->iov_base + iter->iov_offset;
+# endif
+ offStart = (uintptr_t)pbBuf & PAGE_OFFSET_MASK;
+ if (!cPages)
+ offPage0 = offStart;
+ else if (offStart)
+ break;
+
+ cPgSeg = RT_ALIGN_Z(cbSeg, PAGE_SIZE) >> PAGE_SHIFT;
+ if (cPgSeg > cMaxPages) {
+ cPgSeg = cMaxPages;
+ cbSeg = (cPgSeg << PAGE_SHIFT) - offStart;
+ }
+
+ rc = vbsf_lock_kernel_pages(pbBuf, fWrite, cPgSeg, &papPages[cPages]);
+ if (rc == 0) {
+ iov_iter_advance(iter, cbSeg);
+ cbChunk += cbSeg;
+ cPages += cPgSeg;
+ cMaxPages -= cPgSeg;
+ if ( cMaxPages == 0
+ || ((offStart + cbSeg) & PAGE_OFFSET_MASK) != 0)
+ break;
+ } else
+ break;
+ } while (iov_iter_count(iter) > 0);
+ }
+
+ /*
+ * Clean up if we failed; set return values.
+ */
+ if (rc == 0) {
+ /* likely */
+ } else {
+ if (cPages > 0)
+ vbsf_iter_unlock_pages(iter, papPages, cPages, false /*fSetDirty*/);
+ offPage0 = cbChunk = cPages = 0;
+ }
+ *poffPage0 = offPage0;
+ *pcbChunk = cbChunk;
+ *pcPages = cPages;
+ SFLOGFLOW(("vbsf_iter_lock_pages: returns %d - cPages=%#zx offPage0=%#zx cbChunk=%zx\n", rc, cPages, offPage0, cbChunk));
+ return rc;
}
-static void sf_pipe_buf_release(struct pipe_inode_info *pipe,
- struct pipe_buffer *pipe_buf)
+
+/**
+ * Rewinds the I/O vector.
+ */
+static bool vbsf_iter_rewind(struct iov_iter *iter, struct vbsf_iter_stash *pStash, size_t cbToRewind, size_t cbChunk)
{
- free_pipebuf(pipe_buf->page);
+ size_t cbExtra;
+ if (!pStash->pPage) {
+ cbExtra = 0;
+ } else {
+ cbExtra = pStash->cb;
+ vbsf_put_page(pStash->pPage);
+ pStash->pPage = NULL;
+ pStash->cb = 0;
+ pStash->off = 0;
+ }
+
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) || LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0)
+ iov_iter_revert(iter, cbToRewind + cbExtra);
+ return true;
+# else
+ /** @todo impl this */
+ return false;
+# endif
}
-int sf_pipe_buf_confirm(struct pipe_inode_info *info,
- struct pipe_buffer *pipe_buf)
+
+/**
+ * Cleans up the page locking stash.
+ */
+DECLINLINE(void) vbsf_iter_cleanup_stash(struct iov_iter *iter, struct vbsf_iter_stash *pStash)
{
- return 0;
+ if (pStash->pPage)
+ vbsf_iter_rewind(iter, pStash, 0, 0);
}
-static struct pipe_buf_operations sf_pipe_buf_ops = {
- .can_merge = 0,
- .map = sf_pipe_buf_map,
- .unmap = sf_pipe_buf_unmap,
- .confirm = sf_pipe_buf_confirm,
- .release = sf_pipe_buf_release,
- .steal = sf_pipe_buf_steal,
- .get = sf_pipe_buf_get,
-};
-
-#define LOCK_PIPE(pipe) \
- if (pipe->inode) \
- mutex_lock(&pipe->inode->i_mutex);
-
-#define UNLOCK_PIPE(pipe) \
- if (pipe->inode) \
- mutex_unlock(&pipe->inode->i_mutex);
-
-ssize_t
-sf_splice_read(struct file *in, loff_t * poffset,
- struct pipe_inode_info *pipe, size_t len, unsigned int flags)
-{
- size_t bytes_remaining = len;
- loff_t orig_offset = *poffset;
- loff_t offset = orig_offset;
- struct inode *inode = GET_F_DENTRY(in)->d_inode;
- struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
- struct sf_reg_info *sf_r = in->private_data;
- ssize_t retval;
- struct page *kpage = 0;
- size_t nsent = 0;
-
- TRACE();
- if (!S_ISREG(inode->i_mode)) {
- LogFunc(("read from non regular file %d\n", inode->i_mode));
- return -EINVAL;
- }
- if (!len) {
- return 0;
- }
-
- LOCK_PIPE(pipe);
-
- uint32_t req_size = 0;
- while (bytes_remaining > 0) {
- kpage = alloc_page(GFP_KERNEL);
- if (unlikely(kpage == NULL)) {
- UNLOCK_PIPE(pipe);
- return -ENOMEM;
- }
- req_size = 0;
- uint32_t nread = req_size =
- (uint32_t) min(bytes_remaining, (size_t) PAGE_SIZE);
- uint32_t chunk = 0;
- void *kbuf = kmap(kpage);
- while (chunk < req_size) {
- retval =
- sf_reg_read_aux(__func__, sf_g, sf_r, kbuf + chunk,
- &nread, offset);
- if (retval < 0)
- goto err;
- if (nread == 0)
- break;
- chunk += nread;
- offset += nread;
- nread = req_size - chunk;
- }
- if (!pipe->readers) {
- send_sig(SIGPIPE, current, 0);
- retval = -EPIPE;
- goto err;
- }
- if (pipe->nrbufs < PIPE_BUFFERS) {
- struct pipe_buffer *pipebuf =
- pipe->bufs +
- ((pipe->curbuf + pipe->nrbufs) & (PIPE_BUFFERS -
- 1));
- pipebuf->page = kpage;
- pipebuf->ops = &sf_pipe_buf_ops;
- pipebuf->len = req_size;
- pipebuf->offset = 0;
- pipebuf->private = 0;
- pipebuf->flags = 0;
- pipe->nrbufs++;
- nsent += req_size;
- bytes_remaining -= req_size;
- if (signal_pending(current))
- break;
- } else { /* pipe full */
-
- if (flags & SPLICE_F_NONBLOCK) {
- retval = -EAGAIN;
- goto err;
- }
- free_pipebuf(kpage);
- break;
- }
- }
- UNLOCK_PIPE(pipe);
- if (!nsent && signal_pending(current))
- return -ERESTARTSYS;
- *poffset += nsent;
- return offset - orig_offset;
-
- err:
- UNLOCK_PIPE(pipe);
- free_pipebuf(kpage);
- return retval;
-}
-
-#endif /* 2.6.23 <= LINUX_VERSION_CODE < 2.6.31 */
/**
- * Read from a regular file.
+ * Calculates the longest span of pages we could transfer to the host in a
+ * single request.
*
- * @param file the file
- * @param buf the buffer
- * @param size length of the buffer
- * @param off offset within the file
- * @returns the number of read bytes on success, Linux error code otherwise
+ * @returns Page count, non-zero.
+ * @param iter The I/O vector iterator to inspect.
*/
-static ssize_t sf_reg_read(struct file *file, char *buf, size_t size,
- loff_t * off)
+static size_t vbsf_iter_max_span_of_pages(struct iov_iter *iter)
{
- int err;
- void *tmp;
- RTCCPHYS tmp_phys;
- size_t tmp_size;
- size_t left = size;
- ssize_t total_bytes_read = 0;
- struct inode *inode = GET_F_DENTRY(file)->d_inode;
- struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
- struct sf_reg_info *sf_r = file->private_data;
- loff_t pos = *off;
-
- TRACE();
- if (!S_ISREG(inode->i_mode)) {
- LogFunc(("read from non regular file %d\n", inode->i_mode));
- return -EINVAL;
- }
-
- /** @todo XXX Check read permission according to inode->i_mode! */
-
- if (!size)
- return 0;
+ size_t cPages;
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
+ if (iter_is_iovec(iter) || (iter->type & ITER_KVEC)) {
+#endif
+ const struct iovec *pCurIov = iter->iov;
+ size_t cLeft = iter->nr_segs;
+ size_t cPagesSpan = 0;
+
+ /* iovect and kvec are identical, except for the __user tagging of iov_base. */
+ AssertCompileMembersSameSizeAndOffset(struct iovec, iov_base, struct kvec, iov_base);
+ AssertCompileMembersSameSizeAndOffset(struct iovec, iov_len, struct kvec, iov_len);
+ AssertCompile(sizeof(struct iovec) == sizeof(struct kvec));
+
+ cPages = 1;
+ AssertReturn(cLeft > 0, cPages);
+
+ /* Special case: segment offset. */
+ if (iter->iov_offset > 0) {
+ if (iter->iov_offset < pCurIov->iov_len) {
+ size_t const cbSegLeft = pCurIov->iov_len - iter->iov_offset;
+ size_t const offPage0 = ((uintptr_t)pCurIov->iov_base + iter->iov_offset) & PAGE_OFFSET_MASK;
+ cPages = cPagesSpan = RT_ALIGN_Z(offPage0 + cbSegLeft, PAGE_SIZE) >> PAGE_SHIFT;
+ if ((offPage0 + cbSegLeft) & PAGE_OFFSET_MASK)
+ cPagesSpan = 0;
+ }
+ SFLOGFLOW(("vbsf_iter: seg[0]= %p LB %#zx\n", pCurIov->iov_base, pCurIov->iov_len));
+ pCurIov++;
+ cLeft--;
+ }
+
+ /* Full segments. */
+ while (cLeft-- > 0) {
+ if (pCurIov->iov_len > 0) {
+ size_t const offPage0 = (uintptr_t)pCurIov->iov_base & PAGE_OFFSET_MASK;
+ if (offPage0 == 0) {
+ if (!(pCurIov->iov_len & PAGE_OFFSET_MASK)) {
+ cPagesSpan += pCurIov->iov_len >> PAGE_SHIFT;
+ } else {
+ cPagesSpan += RT_ALIGN_Z(pCurIov->iov_len, PAGE_SIZE) >> PAGE_SHIFT;
+ if (cPagesSpan > cPages)
+ cPages = cPagesSpan;
+ cPagesSpan = 0;
+ }
+ } else {
+ if (cPagesSpan > cPages)
+ cPages = cPagesSpan;
+ if (!((offPage0 + pCurIov->iov_len) & PAGE_OFFSET_MASK)) {
+ cPagesSpan = pCurIov->iov_len >> PAGE_SHIFT;
+ } else {
+ cPagesSpan += RT_ALIGN_Z(offPage0 + pCurIov->iov_len, PAGE_SIZE) >> PAGE_SHIFT;
+ if (cPagesSpan > cPages)
+ cPages = cPagesSpan;
+ cPagesSpan = 0;
+ }
+ }
+ }
+ SFLOGFLOW(("vbsf_iter: seg[%u]= %p LB %#zx\n", iter->nr_segs - cLeft, pCurIov->iov_base, pCurIov->iov_len));
+ pCurIov++;
+ }
+ if (cPagesSpan > cPages)
+ cPages = cPagesSpan;
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
+ } else {
+ /* Won't bother with accurate counts for the next two types, just make
+ some rough estimates (does pipes have segments?): */
+ size_t cSegs = iter->type & ITER_BVEC ? RT_MAX(1, iter->nr_segs) : 1;
+ cPages = (iov_iter_count(iter) + (PAGE_SIZE * 2 - 2) * cSegs) >> PAGE_SHIFT;
+ }
+# endif
+ SFLOGFLOW(("vbsf_iter_max_span_of_pages: returns %#zx\n", cPages));
+ return cPages;
+}
- tmp =
- alloc_bounce_buffer(&tmp_size, &tmp_phys, size,
- __PRETTY_FUNCTION__);
- if (!tmp)
- return -ENOMEM;
- while (left) {
- uint32_t to_read, nread;
+/**
+ * Worker for vbsf_reg_read_iter() that deals with larger reads using page
+ * locking.
+ */
+static ssize_t vbsf_reg_read_iter_locking(struct kiocb *kio, struct iov_iter *iter, size_t cbToRead,
+ struct vbsf_super_info *pSuperInfo, struct vbsf_reg_info *sf_r)
+{
+ /*
+ * Estimate how many pages we may possible submit in a single request so
+ * that we can allocate matching request buffer and page array.
+ */
+ struct page *apPagesStack[16];
+ struct page **papPages = &apPagesStack[0];
+ struct page **papPagesFree = NULL;
+ VBOXSFREADPGLSTREQ *pReq;
+ ssize_t cbRet = 0;
+ size_t cMaxPages = vbsf_iter_max_span_of_pages(iter);
+ cMaxPages = RT_MIN(RT_MAX(pSuperInfo->cMaxIoPages, 2), cMaxPages);
+
+ pReq = (VBOXSFREADPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFREADPGLSTREQ, PgLst.aPages[cMaxPages]));
+ while (!pReq && cMaxPages > 4) {
+ cMaxPages /= 2;
+ pReq = (VBOXSFREADPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFREADPGLSTREQ, PgLst.aPages[cMaxPages]));
+ }
+ if (pReq && cMaxPages > RT_ELEMENTS(apPagesStack))
+ papPagesFree = papPages = kmalloc(cMaxPages * sizeof(sizeof(papPages[0])), GFP_KERNEL);
+ if (pReq && papPages) {
+
+ /*
+ * The read loop.
+ */
+ struct vbsf_iter_stash Stash = VBSF_ITER_STASH_INITIALIZER;
+ do {
+ /*
+ * Grab as many pages as we can. This means that if adjacent
+ * segments both starts and ends at a page boundrary, we can
+ * do them both in the same transfer from the host.
+ */
+ size_t cPages = 0;
+ size_t cbChunk = 0;
+ size_t offPage0 = 0;
+ int rc = vbsf_iter_lock_pages(iter, true /*fWrite*/, &Stash, cMaxPages, papPages, &cPages, &offPage0, &cbChunk);
+ if (rc == 0) {
+ size_t iPage = cPages;
+ while (iPage-- > 0)
+ pReq->PgLst.aPages[iPage] = page_to_phys(papPages[iPage]);
+ pReq->PgLst.offFirstPage = (uint16_t)offPage0;
+ AssertStmt(cbChunk <= cbToRead, cbChunk = cbToRead);
+ } else {
+ cbRet = rc;
+ break;
+ }
+
+ /*
+ * Issue the request and unlock the pages.
+ */
+ rc = VbglR0SfHostReqReadPgLst(pSuperInfo->map.root, pReq, sf_r->Handle.hHost, kio->ki_pos, cbChunk, cPages);
+ SFLOGFLOW(("vbsf_reg_read_iter_locking: VbglR0SfHostReqReadPgLst -> %d (cbActual=%#x cbChunk=%#zx of %#zx cPages=%#zx offPage0=%#x\n",
+ rc, pReq->Parms.cb32Read.u.value32, cbChunk, cbToRead, cPages, offPage0));
+
+ vbsf_iter_unlock_pages(iter, papPages, cPages, true /*fSetDirty*/);
+
+ if (RT_SUCCESS(rc)) {
+ /*
+ * Success, advance position and buffer.
+ */
+ uint32_t cbActual = pReq->Parms.cb32Read.u.value32;
+ AssertStmt(cbActual <= cbChunk, cbActual = cbChunk);
+ cbRet += cbActual;
+ kio->ki_pos += cbActual;
+ cbToRead -= cbActual;
+
+ /*
+ * Are we done already?
+ */
+ if (!cbToRead)
+ break;
+ if (cbActual < cbChunk) { /* We ASSUME end-of-file here. */
+ if (vbsf_iter_rewind(iter, &Stash, cbChunk - cbActual, cbActual))
+ iov_iter_truncate(iter, 0);
+ break;
+ }
+ } else {
+ /*
+ * Try rewind the iter structure.
+ */
+ bool const fRewindOkay = vbsf_iter_rewind(iter, &Stash, cbChunk, cbChunk);
+ if (rc == VERR_NO_MEMORY && cMaxPages > 4 && fRewindOkay) {
+ /*
+ * The host probably doesn't have enough heap to handle the
+ * request, reduce the page count and retry.
+ */
+ cMaxPages /= 4;
+ Assert(cMaxPages > 0);
+ } else {
+ /*
+ * If we've successfully read stuff, return it rather than
+ * the error. (Not sure if this is such a great idea...)
+ */
+ if (cbRet <= 0)
+ cbRet = -EPROTO;
+ break;
+ }
+ }
+ } while (cbToRead > 0);
+
+ vbsf_iter_cleanup_stash(iter, &Stash);
+ }
+ else
+ cbRet = -ENOMEM;
+ if (papPagesFree)
+ kfree(papPages);
+ if (pReq)
+ VbglR0PhysHeapFree(pReq);
+ SFLOGFLOW(("vbsf_reg_read_iter_locking: returns %#zx (%zd)\n", cbRet, cbRet));
+ return cbRet;
+}
- to_read = tmp_size;
- if (to_read > left)
- to_read = (uint32_t) left;
- nread = to_read;
+/**
+ * Read into I/O vector iterator.
+ *
+ * @returns Number of bytes read on success, negative errno on error.
+ * @param kio The kernel I/O control block (or something like that).
+ * @param iter The I/O vector iterator describing the buffer.
+ */
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
+static ssize_t vbsf_reg_read_iter(struct kiocb *kio, struct iov_iter *iter)
+# else
+static ssize_t vbsf_reg_aio_read(struct kiocb *kio, const struct iovec *iov, unsigned long cSegs, loff_t offFile)
+# endif
+{
+# if LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0)
+ struct vbsf_iov_iter fake_iter = VBSF_IOV_ITER_INITIALIZER(cSegs, iov, 0 /*write*/);
+ struct vbsf_iov_iter *iter = &fake_iter;
+# endif
+ size_t cbToRead = iov_iter_count(iter);
+ struct inode *inode = VBSF_GET_F_DENTRY(kio->ki_filp)->d_inode;
+ struct address_space *mapping = inode->i_mapping;
+
+ struct vbsf_reg_info *sf_r = kio->ki_filp->private_data;
+ struct vbsf_super_info *pSuperInfo = VBSF_GET_SUPER_INFO(inode->i_sb);
+
+ SFLOGFLOW(("vbsf_reg_read_iter: inode=%p file=%p size=%#zx off=%#llx type=%#x\n",
+ inode, kio->ki_filp, cbToRead, kio->ki_pos, iter->type));
+ AssertReturn(S_ISREG(inode->i_mode), -EINVAL);
+
+ /*
+ * Do we have anything at all to do here?
+ */
+ if (!cbToRead)
+ return 0;
+
+ /*
+ * If there is a mapping and O_DIRECT isn't in effect, we must at a
+ * heed dirty pages in the mapping and read from them. For simplicity
+ * though, we just do page cache reading when there are writable
+ * mappings around with any kind of pages loaded.
+ */
+ if (vbsf_should_use_cached_read(kio->ki_filp, mapping, pSuperInfo)) {
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
+ return generic_file_read_iter(kio, iter);
+# else
+ return generic_file_aio_read(kio, iov, cSegs, offFile);
+# endif
+ }
+
+ /*
+ * Now now we reject async I/O requests.
+ */
+ if (!is_sync_kiocb(kio)) {
+ SFLOGFLOW(("vbsf_reg_read_iter: async I/O not yet supported\n")); /** @todo extend FsPerf with AIO tests. */
+ return -EOPNOTSUPP;
+ }
+
+ /*
+ * For small requests, try use an embedded buffer provided we get a heap block
+ * that does not cross page boundraries (see host code).
+ */
+ if (cbToRead <= PAGE_SIZE / 4 * 3 - RT_UOFFSETOF(VBOXSFREADEMBEDDEDREQ, abData[0]) /* see allocator */) {
+ uint32_t const cbReq = RT_UOFFSETOF(VBOXSFREADEMBEDDEDREQ, abData[0]) + cbToRead;
+ VBOXSFREADEMBEDDEDREQ *pReq = (VBOXSFREADEMBEDDEDREQ *)VbglR0PhysHeapAlloc(cbReq);
+ if (pReq) {
+ if ((PAGE_SIZE - ((uintptr_t)pReq & PAGE_OFFSET_MASK)) >= cbReq) {
+ ssize_t cbRet;
+ int vrc = VbglR0SfHostReqReadEmbedded(pSuperInfo->map.root, pReq, sf_r->Handle.hHost,
+ kio->ki_pos, (uint32_t)cbToRead);
+ if (RT_SUCCESS(vrc)) {
+ cbRet = pReq->Parms.cb32Read.u.value32;
+ AssertStmt(cbRet <= (ssize_t)cbToRead, cbRet = cbToRead);
+ if (copy_to_iter(pReq->abData, cbRet, iter) == cbRet) {
+ kio->ki_pos += cbRet;
+ if (cbRet < cbToRead)
+ iov_iter_truncate(iter, 0);
+ } else
+ cbRet = -EFAULT;
+ } else
+ cbRet = -EPROTO;
+ VbglR0PhysHeapFree(pReq);
+ SFLOGFLOW(("vbsf_reg_read_iter: returns %#zx (%zd)\n", cbRet, cbRet));
+ return cbRet;
+ }
+ VbglR0PhysHeapFree(pReq);
+ }
+ }
+
+ /*
+ * Otherwise do the page locking thing.
+ */
+ return vbsf_reg_read_iter_locking(kio, iter, cbToRead, pSuperInfo, sf_r);
+}
- err = sf_reg_read_aux(__func__, sf_g, sf_r, tmp, &nread, pos);
- if (err)
- goto fail;
- if (copy_to_user(buf, tmp, nread)) {
- err = -EFAULT;
- goto fail;
- }
+/**
+ * Worker for vbsf_reg_write_iter() that deals with larger writes using page
+ * locking.
+ */
+static ssize_t vbsf_reg_write_iter_locking(struct kiocb *kio, struct iov_iter *iter, size_t cbToWrite, loff_t offFile,
+ struct vbsf_super_info *pSuperInfo, struct vbsf_reg_info *sf_r,
+ struct inode *inode, struct vbsf_inode_info *sf_i, struct address_space *mapping)
+{
+ /*
+ * Estimate how many pages we may possible submit in a single request so
+ * that we can allocate matching request buffer and page array.
+ */
+ struct page *apPagesStack[16];
+ struct page **papPages = &apPagesStack[0];
+ struct page **papPagesFree = NULL;
+ VBOXSFWRITEPGLSTREQ *pReq;
+ ssize_t cbRet = 0;
+ size_t cMaxPages = vbsf_iter_max_span_of_pages(iter);
+ cMaxPages = RT_MIN(RT_MAX(pSuperInfo->cMaxIoPages, 2), cMaxPages);
+
+ pReq = (VBOXSFWRITEPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFWRITEPGLSTREQ, PgLst.aPages[cMaxPages]));
+ while (!pReq && cMaxPages > 4) {
+ cMaxPages /= 2;
+ pReq = (VBOXSFWRITEPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFWRITEPGLSTREQ, PgLst.aPages[cMaxPages]));
+ }
+ if (pReq && cMaxPages > RT_ELEMENTS(apPagesStack))
+ papPagesFree = papPages = kmalloc(cMaxPages * sizeof(sizeof(papPages[0])), GFP_KERNEL);
+ if (pReq && papPages) {
+
+ /*
+ * The write loop.
+ */
+ struct vbsf_iter_stash Stash = VBSF_ITER_STASH_INITIALIZER;
+ do {
+ /*
+ * Grab as many pages as we can. This means that if adjacent
+ * segments both starts and ends at a page boundrary, we can
+ * do them both in the same transfer from the host.
+ */
+ size_t cPages = 0;
+ size_t cbChunk = 0;
+ size_t offPage0 = 0;
+ int rc = vbsf_iter_lock_pages(iter, false /*fWrite*/, &Stash, cMaxPages, papPages, &cPages, &offPage0, &cbChunk);
+ if (rc == 0) {
+ size_t iPage = cPages;
+ while (iPage-- > 0)
+ pReq->PgLst.aPages[iPage] = page_to_phys(papPages[iPage]);
+ pReq->PgLst.offFirstPage = (uint16_t)offPage0;
+ AssertStmt(cbChunk <= cbToWrite, cbChunk = cbToWrite);
+ } else {
+ cbRet = rc;
+ break;
+ }
+
+ /*
+ * Issue the request and unlock the pages.
+ */
+ rc = VbglR0SfHostReqWritePgLst(pSuperInfo->map.root, pReq, sf_r->Handle.hHost, offFile, cbChunk, cPages);
+ sf_i->ModificationTimeAtOurLastWrite = sf_i->ModificationTime;
+ SFLOGFLOW(("vbsf_reg_write_iter_locking: VbglR0SfHostReqWritePgLst -> %d (cbActual=%#x cbChunk=%#zx of %#zx cPages=%#zx offPage0=%#x\n",
+ rc, pReq->Parms.cb32Write.u.value32, cbChunk, cbToWrite, cPages, offPage0));
+ if (RT_SUCCESS(rc)) {
+ /*
+ * Success, advance position and buffer.
+ */
+ uint32_t cbActual = pReq->Parms.cb32Write.u.value32;
+ AssertStmt(cbActual <= cbChunk, cbActual = cbChunk);
+
+ vbsf_reg_write_sync_page_cache(mapping, offFile, cbActual, NULL /*pbSrcBuf*/, papPages, offPage0, cPages);
+ vbsf_iter_unlock_pages(iter, papPages, cPages, false /*fSetDirty*/);
+
+ cbRet += cbActual;
+ offFile += cbActual;
+ kio->ki_pos = offFile;
+ cbToWrite -= cbActual;
+ if (offFile > i_size_read(inode))
+ i_size_write(inode, offFile);
+ sf_i->force_restat = 1; /* mtime (and size) may have changed */
+
+ /*
+ * Are we done already?
+ */
+ if (!cbToWrite)
+ break;
+ if (cbActual < cbChunk) { /* We ASSUME end-of-file here. */
+ if (vbsf_iter_rewind(iter, &Stash, cbChunk - cbActual, cbActual))
+ iov_iter_truncate(iter, 0);
+ break;
+ }
+ } else {
+ /*
+ * Try rewind the iter structure.
+ */
+ bool fRewindOkay;
+ vbsf_iter_unlock_pages(iter, papPages, cPages, false /*fSetDirty*/);
+ fRewindOkay = vbsf_iter_rewind(iter, &Stash, cbChunk, cbChunk);
+ if (rc == VERR_NO_MEMORY && cMaxPages > 4 && fRewindOkay) {
+ /*
+ * The host probably doesn't have enough heap to handle the
+ * request, reduce the page count and retry.
+ */
+ cMaxPages /= 4;
+ Assert(cMaxPages > 0);
+ } else {
+ /*
+ * If we've successfully written stuff, return it rather than
+ * the error. (Not sure if this is such a great idea...)
+ */
+ if (cbRet <= 0)
+ cbRet = -EPROTO;
+ break;
+ }
+ }
+ } while (cbToWrite > 0);
+
+ vbsf_iter_cleanup_stash(iter, &Stash);
+ }
+ else
+ cbRet = -ENOMEM;
+ if (papPagesFree)
+ kfree(papPages);
+ if (pReq)
+ VbglR0PhysHeapFree(pReq);
+ SFLOGFLOW(("vbsf_reg_write_iter_locking: returns %#zx (%zd)\n", cbRet, cbRet));
+ return cbRet;
+}
- pos += nread;
- left -= nread;
- buf += nread;
- total_bytes_read += nread;
- if (nread != to_read)
- break;
- }
- *off += total_bytes_read;
- free_bounce_buffer(tmp);
- return total_bytes_read;
+/**
+ * Write from I/O vector iterator.
+ *
+ * @returns Number of bytes written on success, negative errno on error.
+ * @param kio The kernel I/O control block (or something like that).
+ * @param iter The I/O vector iterator describing the buffer.
+ */
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
+static ssize_t vbsf_reg_write_iter(struct kiocb *kio, struct iov_iter *iter)
+# else
+static ssize_t vbsf_reg_aio_write(struct kiocb *kio, const struct iovec *iov, unsigned long cSegs, loff_t offFile)
+# endif
+{
+# if LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0)
+ struct vbsf_iov_iter fake_iter = VBSF_IOV_ITER_INITIALIZER(cSegs, iov, 1 /*write*/);
+ struct vbsf_iov_iter *iter = &fake_iter;
+# endif
+ size_t cbToWrite = iov_iter_count(iter);
+ struct inode *inode = VBSF_GET_F_DENTRY(kio->ki_filp)->d_inode;
+ struct vbsf_inode_info *sf_i = VBSF_GET_INODE_INFO(inode);
+ struct address_space *mapping = inode->i_mapping;
+
+ struct vbsf_reg_info *sf_r = kio->ki_filp->private_data;
+ struct vbsf_super_info *pSuperInfo = VBSF_GET_SUPER_INFO(inode->i_sb);
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
+ loff_t offFile = kio->ki_pos;
+# endif
- fail:
- free_bounce_buffer(tmp);
- return err;
+ SFLOGFLOW(("vbsf_reg_write_iter: inode=%p file=%p size=%#zx off=%#llx type=%#x\n",
+ inode, kio->ki_filp, cbToWrite, offFile, iter->type));
+ AssertReturn(S_ISREG(inode->i_mode), -EINVAL);
+
+ /*
+ * Enforce APPEND flag.
+ */
+ /** @todo This should be handled by the host, it returning the new file
+ * offset when appending. We may have an outdated i_size value here! */
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
+ if (kio->ki_flags & IOCB_APPEND)
+# else
+ if (kio->ki_filp->f_flags & O_APPEND)
+# endif
+ kio->ki_pos = offFile = i_size_read(inode);
+
+ /*
+ * Do we have anything at all to do here?
+ */
+ if (!cbToWrite)
+ return 0;
+
+ /** @todo Implement the read-write caching mode. */
+
+ /*
+ * Now now we reject async I/O requests.
+ */
+ if (!is_sync_kiocb(kio)) {
+ SFLOGFLOW(("vbsf_reg_write_iter: async I/O not yet supported\n")); /** @todo extend FsPerf with AIO tests. */
+ return -EOPNOTSUPP;
+ }
+
+ /*
+ * If there are active writable mappings, coordinate with any
+ * pending writes via those.
+ */
+ if ( mapping
+ && mapping->nrpages > 0
+ && mapping_writably_mapped(mapping)) {
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
+ int err = filemap_fdatawait_range(mapping, offFile, offFile + cbToWrite - 1);
+ if (err)
+ return err;
+# else
+ /** @todo ... */
+# endif
+ }
+
+ /*
+ * For small requests, try use an embedded buffer provided we get a heap block
+ * that does not cross page boundraries (see host code).
+ */
+ if (cbToWrite <= PAGE_SIZE / 4 * 3 - RT_UOFFSETOF(VBOXSFWRITEEMBEDDEDREQ, abData[0]) /* see allocator */) {
+ uint32_t const cbReq = RT_UOFFSETOF(VBOXSFWRITEEMBEDDEDREQ, abData[0]) + cbToWrite;
+ VBOXSFWRITEEMBEDDEDREQ *pReq = (VBOXSFWRITEEMBEDDEDREQ *)VbglR0PhysHeapAlloc(cbReq);
+ if (pReq) {
+ if ((PAGE_SIZE - ((uintptr_t)pReq & PAGE_OFFSET_MASK)) >= cbReq) {
+ ssize_t cbRet;
+ if (copy_from_iter(pReq->abData, cbToWrite, iter) == cbToWrite) {
+ int vrc = VbglR0SfHostReqWriteEmbedded(pSuperInfo->map.root, pReq, sf_r->Handle.hHost,
+ offFile, (uint32_t)cbToWrite);
+ sf_i->ModificationTimeAtOurLastWrite = sf_i->ModificationTime;
+ if (RT_SUCCESS(vrc)) {
+ cbRet = pReq->Parms.cb32Write.u.value32;
+ AssertStmt(cbRet <= (ssize_t)cbToWrite, cbRet = cbToWrite);
+ vbsf_reg_write_sync_page_cache(mapping, offFile, (uint32_t)cbRet, pReq->abData,
+ NULL /*papSrcPages*/, 0 /*offSrcPage0*/, 0 /*cSrcPages*/);
+ kio->ki_pos = offFile += cbRet;
+ if (offFile > i_size_read(inode))
+ i_size_write(inode, offFile);
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
+ if ((size_t)cbRet < cbToWrite)
+ iov_iter_revert(iter, cbToWrite - cbRet);
+# endif
+ } else
+ cbRet = -EPROTO;
+ sf_i->force_restat = 1; /* mtime (and size) may have changed */
+ } else
+ cbRet = -EFAULT;
+ VbglR0PhysHeapFree(pReq);
+ SFLOGFLOW(("vbsf_reg_write_iter: returns %#zx (%zd)\n", cbRet, cbRet));
+ return cbRet;
+ }
+ VbglR0PhysHeapFree(pReq);
+ }
+ }
+
+ /*
+ * Otherwise do the page locking thing.
+ */
+ return vbsf_reg_write_iter_locking(kio, iter, cbToWrite, offFile, pSuperInfo, sf_r, inode, sf_i, mapping);
}
+#endif /* >= 2.6.19 */
+
/**
- * Write to a regular file.
+ * Used by vbsf_reg_open() and vbsf_inode_atomic_open() to
*
- * @param file the file
- * @param buf the buffer
- * @param size length of the buffer
- * @param off offset within the file
- * @returns the number of written bytes on success, Linux error code otherwise
+ * @returns shared folders create flags.
+ * @param fLnxOpen The linux O_XXX flags to convert.
+ * @param pfHandle Pointer to vbsf_handle::fFlags.
+ * @param pszCaller Caller, for logging purposes.
*/
-static ssize_t sf_reg_write(struct file *file, const char *buf, size_t size,
- loff_t * off)
-{
- int err;
- void *tmp;
- RTCCPHYS tmp_phys;
- size_t tmp_size;
- size_t left = size;
- ssize_t total_bytes_written = 0;
- struct inode *inode = GET_F_DENTRY(file)->d_inode;
- struct sf_inode_info *sf_i = GET_INODE_INFO(inode);
- struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
- struct sf_reg_info *sf_r = file->private_data;
- loff_t pos;
-
- TRACE();
- BUG_ON(!sf_i);
- BUG_ON(!sf_g);
- BUG_ON(!sf_r);
-
- if (!S_ISREG(inode->i_mode)) {
- LogFunc(("write to non regular file %d\n", inode->i_mode));
- return -EINVAL;
- }
-
- pos = *off;
- if (file->f_flags & O_APPEND) {
- pos = inode->i_size;
- *off = pos;
- }
-
- /** @todo XXX Check write permission according to inode->i_mode! */
-
- if (!size)
- return 0;
-
- tmp =
- alloc_bounce_buffer(&tmp_size, &tmp_phys, size,
- __PRETTY_FUNCTION__);
- if (!tmp)
- return -ENOMEM;
-
- while (left) {
- uint32_t to_write, nwritten;
-
- to_write = tmp_size;
- if (to_write > left)
- to_write = (uint32_t) left;
-
- nwritten = to_write;
-
- if (copy_from_user(tmp, buf, to_write)) {
- err = -EFAULT;
- goto fail;
- }
-
- err =
- VbglR0SfWritePhysCont(&client_handle, &sf_g->map,
- sf_r->handle, pos, &nwritten,
- tmp_phys);
- err = RT_FAILURE(err) ? -EPROTO : 0;
- if (err)
- goto fail;
-
- pos += nwritten;
- left -= nwritten;
- buf += nwritten;
- total_bytes_written += nwritten;
- if (nwritten != to_write)
- break;
- }
-
- *off += total_bytes_written;
- if (*off > inode->i_size)
- inode->i_size = *off;
-
- sf_i->force_restat = 1;
- free_bounce_buffer(tmp);
- return total_bytes_written;
-
- fail:
- free_bounce_buffer(tmp);
- return err;
+uint32_t vbsf_linux_oflags_to_vbox(unsigned fLnxOpen, uint32_t *pfHandle, const char *pszCaller)
+{
+ uint32_t fVBoxFlags = SHFL_CF_ACCESS_DENYNONE;
+
+ /*
+ * Disposition.
+ */
+ if (fLnxOpen & O_CREAT) {
+ Log(("%s: O_CREAT set\n", pszCaller));
+ fVBoxFlags |= SHFL_CF_ACT_CREATE_IF_NEW;
+ if (fLnxOpen & O_EXCL) {
+ Log(("%s: O_EXCL set\n", pszCaller));
+ fVBoxFlags |= SHFL_CF_ACT_FAIL_IF_EXISTS;
+ } else if (fLnxOpen & O_TRUNC) {
+ Log(("%s: O_TRUNC set\n", pszCaller));
+ fVBoxFlags |= SHFL_CF_ACT_OVERWRITE_IF_EXISTS;
+ } else
+ fVBoxFlags |= SHFL_CF_ACT_OPEN_IF_EXISTS;
+ } else {
+ fVBoxFlags |= SHFL_CF_ACT_FAIL_IF_NEW;
+ if (fLnxOpen & O_TRUNC) {
+ Log(("%s: O_TRUNC set\n", pszCaller));
+ fVBoxFlags |= SHFL_CF_ACT_OVERWRITE_IF_EXISTS;
+ }
+ }
+
+ /*
+ * Access.
+ */
+ switch (fLnxOpen & O_ACCMODE) {
+ case O_RDONLY:
+ fVBoxFlags |= SHFL_CF_ACCESS_READ;
+ *pfHandle |= VBSF_HANDLE_F_READ;
+ break;
+
+ case O_WRONLY:
+ fVBoxFlags |= SHFL_CF_ACCESS_WRITE;
+ *pfHandle |= VBSF_HANDLE_F_WRITE;
+ break;
+
+ case O_RDWR:
+ fVBoxFlags |= SHFL_CF_ACCESS_READWRITE;
+ *pfHandle |= VBSF_HANDLE_F_READ | VBSF_HANDLE_F_WRITE;
+ break;
+
+ default:
+ BUG();
+ }
+
+ if (fLnxOpen & O_APPEND) {
+ Log(("%s: O_APPEND set\n", pszCaller));
+ fVBoxFlags |= SHFL_CF_ACCESS_APPEND;
+ *pfHandle |= VBSF_HANDLE_F_APPEND;
+ }
+
+ /*
+ * Only directories?
+ */
+ if (fLnxOpen & O_DIRECTORY) {
+ Log(("%s: O_DIRECTORY set\n", pszCaller));
+ fVBoxFlags |= SHFL_CF_DIRECTORY;
+ }
+
+ return fVBoxFlags;
}
+
/**
* Open a regular file.
*
@@ -431,121 +2980,115 @@ static ssize_t sf_reg_write(struct file *file, const char *buf, size_t size,
* @param file the file
* @returns 0 on success, Linux error code otherwise
*/
-static int sf_reg_open(struct inode *inode, struct file *file)
-{
- int rc, rc_linux = 0;
- struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
- struct sf_inode_info *sf_i = GET_INODE_INFO(inode);
- struct sf_reg_info *sf_r;
- SHFLCREATEPARMS params;
-
- TRACE();
- BUG_ON(!sf_g);
- BUG_ON(!sf_i);
-
- LogFunc(("open %s\n", sf_i->path->String.utf8));
-
- sf_r = kmalloc(sizeof(*sf_r), GFP_KERNEL);
- if (!sf_r) {
- LogRelFunc(("could not allocate reg info\n"));
- return -ENOMEM;
- }
-
- /* Already open? */
- if (sf_i->handle != SHFL_HANDLE_NIL) {
- /*
- * This inode was created with sf_create_aux(). Check the CreateFlags:
- * O_CREAT, O_TRUNC: inherent true (file was just created). Not sure
- * about the access flags (SHFL_CF_ACCESS_*).
- */
- sf_i->force_restat = 1;
- sf_r->handle = sf_i->handle;
- sf_i->handle = SHFL_HANDLE_NIL;
- sf_i->file = file;
- file->private_data = sf_r;
- return 0;
- }
-
- RT_ZERO(params);
- params.Handle = SHFL_HANDLE_NIL;
- /* We check the value of params.Handle afterwards to find out if
- * the call succeeded or failed, as the API does not seem to cleanly
- * distinguish error and informational messages.
- *
- * Furthermore, we must set params.Handle to SHFL_HANDLE_NIL to
- * make the shared folders host service use our fMode parameter */
-
- if (file->f_flags & O_CREAT) {
- LogFunc(("O_CREAT set\n"));
- params.CreateFlags |= SHFL_CF_ACT_CREATE_IF_NEW;
- /* We ignore O_EXCL, as the Linux kernel seems to call create
- beforehand itself, so O_EXCL should always fail. */
- if (file->f_flags & O_TRUNC) {
- LogFunc(("O_TRUNC set\n"));
- params.CreateFlags |= SHFL_CF_ACT_OVERWRITE_IF_EXISTS;
- } else
- params.CreateFlags |= SHFL_CF_ACT_OPEN_IF_EXISTS;
- } else {
- params.CreateFlags |= SHFL_CF_ACT_FAIL_IF_NEW;
- if (file->f_flags & O_TRUNC) {
- LogFunc(("O_TRUNC set\n"));
- params.CreateFlags |= SHFL_CF_ACT_OVERWRITE_IF_EXISTS;
- }
- }
-
- switch (file->f_flags & O_ACCMODE) {
- case O_RDONLY:
- params.CreateFlags |= SHFL_CF_ACCESS_READ;
- break;
-
- case O_WRONLY:
- params.CreateFlags |= SHFL_CF_ACCESS_WRITE;
- break;
-
- case O_RDWR:
- params.CreateFlags |= SHFL_CF_ACCESS_READWRITE;
- break;
-
- default:
- BUG();
- }
-
- if (file->f_flags & O_APPEND) {
- LogFunc(("O_APPEND set\n"));
- params.CreateFlags |= SHFL_CF_ACCESS_APPEND;
- }
-
- params.Info.Attr.fMode = inode->i_mode;
- LogFunc(("sf_reg_open: calling VbglR0SfCreate, file %s, flags=%#x, %#x\n", sf_i->path->String.utf8, file->f_flags, params.CreateFlags));
- rc = VbglR0SfCreate(&client_handle, &sf_g->map, sf_i->path, ¶ms);
- if (RT_FAILURE(rc)) {
- LogFunc(("VbglR0SfCreate failed flags=%d,%#x rc=%Rrc\n",
- file->f_flags, params.CreateFlags, rc));
- kfree(sf_r);
- return -RTErrConvertToErrno(rc);
- }
-
- if (SHFL_HANDLE_NIL == params.Handle) {
- switch (params.Result) {
- case SHFL_PATH_NOT_FOUND:
- case SHFL_FILE_NOT_FOUND:
- rc_linux = -ENOENT;
- break;
- case SHFL_FILE_EXISTS:
- rc_linux = -EEXIST;
- break;
- default:
- break;
- }
- }
-
- sf_i->force_restat = 1;
- sf_r->handle = params.Handle;
- sf_i->file = file;
- file->private_data = sf_r;
- return rc_linux;
+static int vbsf_reg_open(struct inode *inode, struct file *file)
+{
+ int rc, rc_linux = 0;
+ struct vbsf_super_info *pSuperInfo = VBSF_GET_SUPER_INFO(inode->i_sb);
+ struct vbsf_inode_info *sf_i = VBSF_GET_INODE_INFO(inode);
+ struct dentry *dentry = VBSF_GET_F_DENTRY(file);
+ struct vbsf_reg_info *sf_r;
+ VBOXSFCREATEREQ *pReq;
+
+ SFLOGFLOW(("vbsf_reg_open: inode=%p file=%p flags=%#x %s\n", inode, file, file->f_flags, sf_i ? sf_i->path->String.ach : NULL));
+ Assert(pSuperInfo);
+ Assert(sf_i);
+
+ sf_r = kmalloc(sizeof(*sf_r), GFP_KERNEL);
+ if (!sf_r) {
+ LogRelFunc(("could not allocate reg info\n"));
+ return -ENOMEM;
+ }
+
+ RTListInit(&sf_r->Handle.Entry);
+ sf_r->Handle.cRefs = 1;
+ sf_r->Handle.fFlags = VBSF_HANDLE_F_FILE | VBSF_HANDLE_F_MAGIC;
+ sf_r->Handle.hHost = SHFL_HANDLE_NIL;
+
+ /* Already open? */
+ if (sf_i->handle != SHFL_HANDLE_NIL) {
+ /*
+ * This inode was created with vbsf_create_worker(). Check the CreateFlags:
+ * O_CREAT, O_TRUNC: inherent true (file was just created). Not sure
+ * about the access flags (SHFL_CF_ACCESS_*).
+ */
+ sf_i->force_restat = 1;
+ sf_r->Handle.hHost = sf_i->handle;
+ sf_i->handle = SHFL_HANDLE_NIL;
+ file->private_data = sf_r;
+
+ sf_r->Handle.fFlags |= VBSF_HANDLE_F_READ | VBSF_HANDLE_F_WRITE; /** @todo fix */
+ vbsf_handle_append(sf_i, &sf_r->Handle);
+ SFLOGFLOW(("vbsf_reg_open: returns 0 (#1) - sf_i=%p hHost=%#llx\n", sf_i, sf_r->Handle.hHost));
+ return 0;
+ }
+
+ pReq = (VBOXSFCREATEREQ *)VbglR0PhysHeapAlloc(sizeof(*pReq) + sf_i->path->u16Size);
+ if (!pReq) {
+ kfree(sf_r);
+ LogRelFunc(("Failed to allocate a VBOXSFCREATEREQ buffer!\n"));
+ return -ENOMEM;
+ }
+ memcpy(&pReq->StrPath, sf_i->path, SHFLSTRING_HEADER_SIZE + sf_i->path->u16Size);
+ RT_ZERO(pReq->CreateParms);
+ pReq->CreateParms.Handle = SHFL_HANDLE_NIL;
+
+ /* We check the value of pReq->CreateParms.Handle afterwards to
+ * find out if the call succeeded or failed, as the API does not seem
+ * to cleanly distinguish error and informational messages.
+ *
+ * Furthermore, we must set pReq->CreateParms.Handle to SHFL_HANDLE_NIL
+ * to make the shared folders host service use our fMode parameter */
+
+ /* We ignore O_EXCL, as the Linux kernel seems to call create
+ beforehand itself, so O_EXCL should always fail. */
+ pReq->CreateParms.CreateFlags = vbsf_linux_oflags_to_vbox(file->f_flags & ~O_EXCL, &sf_r->Handle.fFlags, __FUNCTION__);
+ pReq->CreateParms.Info.Attr.fMode = inode->i_mode;
+ LogFunc(("vbsf_reg_open: calling VbglR0SfHostReqCreate, file %s, flags=%#x, %#x\n",
+ sf_i->path->String.utf8, file->f_flags, pReq->CreateParms.CreateFlags));
+ rc = VbglR0SfHostReqCreate(pSuperInfo->map.root, pReq);
+ if (RT_FAILURE(rc)) {
+ LogFunc(("VbglR0SfHostReqCreate failed flags=%d,%#x rc=%Rrc\n", file->f_flags, pReq->CreateParms.CreateFlags, rc));
+ kfree(sf_r);
+ VbglR0PhysHeapFree(pReq);
+ return -RTErrConvertToErrno(rc);
+ }
+
+ if (pReq->CreateParms.Handle != SHFL_HANDLE_NIL) {
+ vbsf_dentry_chain_increase_ttl(dentry);
+ rc_linux = 0;
+ } else {
+ switch (pReq->CreateParms.Result) {
+ case SHFL_PATH_NOT_FOUND:
+ rc_linux = -ENOENT;
+ break;
+ case SHFL_FILE_NOT_FOUND:
+ /** @todo sf_dentry_increase_parent_ttl(file->f_dentry); if we can trust it. */
+ rc_linux = -ENOENT;
+ break;
+ case SHFL_FILE_EXISTS:
+ vbsf_dentry_chain_increase_ttl(dentry);
+ rc_linux = -EEXIST;
+ break;
+ default:
+ vbsf_dentry_chain_increase_parent_ttl(dentry);
+ rc_linux = 0;
+ break;
+ }
+ }
+
+/** @todo update the inode here, pReq carries the latest stats! Very helpful
+ * for detecting host side changes. */
+
+ sf_i->force_restat = 1; /** @todo Why?!? */
+ sf_r->Handle.hHost = pReq->CreateParms.Handle;
+ file->private_data = sf_r;
+ vbsf_handle_append(sf_i, &sf_r->Handle);
+ VbglR0PhysHeapFree(pReq);
+ SFLOGFLOW(("vbsf_reg_open: returns 0 (#2) - sf_i=%p hHost=%#llx\n", sf_i, sf_r->Handle.hHost));
+ return rc_linux;
}
+
/**
* Close a regular file.
*
@@ -553,324 +3096,675 @@ static int sf_reg_open(struct inode *inode, struct file *file)
* @param file the file
* @returns 0 on success, Linux error code otherwise
*/
-static int sf_reg_release(struct inode *inode, struct file *file)
+static int vbsf_reg_release(struct inode *inode, struct file *file)
{
- int rc;
- struct sf_reg_info *sf_r;
- struct sf_glob_info *sf_g;
- struct sf_inode_info *sf_i = GET_INODE_INFO(inode);
-
- TRACE();
- sf_g = GET_GLOB_INFO(inode->i_sb);
- sf_r = file->private_data;
+ struct vbsf_inode_info *sf_i = VBSF_GET_INODE_INFO(inode);
+ struct vbsf_reg_info *sf_r = file->private_data;
+
+ SFLOGFLOW(("vbsf_reg_release: inode=%p file=%p\n", inode, file));
+ if (sf_r) {
+ struct vbsf_super_info *pSuperInfo = VBSF_GET_SUPER_INFO(inode->i_sb);
+ struct address_space *mapping = inode->i_mapping;
+ Assert(pSuperInfo);
+
+ /* If we're closing the last handle for this inode, make sure the flush
+ the mapping or we'll end up in vbsf_writepage without a handle. */
+ if ( mapping
+ && mapping->nrpages > 0
+ /** @todo && last writable handle */ ) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 25)
+ if (filemap_fdatawrite(mapping) != -EIO)
+#else
+ if ( filemap_fdatasync(mapping) == 0
+ && fsync_inode_data_buffers(inode) == 0)
+#endif
+ filemap_fdatawait(inode->i_mapping);
+ }
- BUG_ON(!sf_g);
- BUG_ON(!sf_r);
+ /* Release sf_r, closing the handle if we're the last user. */
+ file->private_data = NULL;
+ vbsf_handle_release(&sf_r->Handle, pSuperInfo, "vbsf_reg_release");
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 25)
- /* See the smbfs source (file.c). mmap in particular can cause data to be
- * written to the file after it is closed, which we can't cope with. We
- * copy and paste the body of filemap_write_and_wait() here as it was not
- * defined before 2.6.6 and not exported until quite a bit later. */
- /* filemap_write_and_wait(inode->i_mapping); */
- if (inode->i_mapping->nrpages
- && filemap_fdatawrite(inode->i_mapping) != -EIO)
- filemap_fdatawait(inode->i_mapping);
-#endif
- rc = VbglR0SfClose(&client_handle, &sf_g->map, sf_r->handle);
- if (RT_FAILURE(rc))
- LogFunc(("VbglR0SfClose failed rc=%Rrc\n", rc));
-
- kfree(sf_r);
- sf_i->file = NULL;
- sf_i->handle = SHFL_HANDLE_NIL;
- file->private_data = NULL;
- return 0;
+ sf_i->handle = SHFL_HANDLE_NIL;
+ }
+ return 0;
}
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
-static int sf_reg_fault(struct vm_fault *vmf)
-#elif LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
-static int sf_reg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
-static struct page *sf_reg_nopage(struct vm_area_struct *vma,
- unsigned long vaddr, int *type)
-# define SET_TYPE(t) *type = (t)
-#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) */
-static struct page *sf_reg_nopage(struct vm_area_struct *vma,
- unsigned long vaddr, int unused)
-# define SET_TYPE(t)
-#endif
-{
- struct page *page;
- char *buf;
- loff_t off;
- uint32_t nread = PAGE_SIZE;
- int err;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
- struct vm_area_struct *vma = vmf->vma;
-#endif
- struct file *file = vma->vm_file;
- struct inode *inode = GET_F_DENTRY(file)->d_inode;
- struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
- struct sf_reg_info *sf_r = file->private_data;
-
- TRACE();
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
- if (vmf->pgoff > vma->vm_end)
- return VM_FAULT_SIGBUS;
-#else
- if (vaddr > vma->vm_end) {
- SET_TYPE(VM_FAULT_SIGBUS);
- return NOPAGE_SIGBUS;
- }
-#endif
-
- /* Don't use GFP_HIGHUSER as long as sf_reg_read_aux() calls VbglR0SfRead()
- * which works on virtual addresses. On Linux cannot reliably determine the
- * physical address for high memory, see rtR0MemObjNativeLockKernel(). */
- page = alloc_page(GFP_USER);
- if (!page) {
- LogRelFunc(("failed to allocate page\n"));
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
- return VM_FAULT_OOM;
-#else
- SET_TYPE(VM_FAULT_OOM);
- return NOPAGE_OOM;
-#endif
- }
- buf = kmap(page);
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
- off = (vmf->pgoff << PAGE_SHIFT);
-#else
- off = (vaddr - vma->vm_start) + (vma->vm_pgoff << PAGE_SHIFT);
-#endif
- err = sf_reg_read_aux(__func__, sf_g, sf_r, buf, &nread, off);
- if (err) {
- kunmap(page);
- put_page(page);
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
- return VM_FAULT_SIGBUS;
-#else
- SET_TYPE(VM_FAULT_SIGBUS);
- return NOPAGE_SIGBUS;
-#endif
- }
+/**
+ * Wrapper around generic/default seek function that ensures that we've got
+ * the up-to-date file size when doing anything relative to EOF.
+ *
+ * The issue is that the host may extend the file while we weren't looking and
+ * if the caller wishes to append data, it may end up overwriting existing data
+ * if we operate with a stale size. So, we always retrieve the file size on EOF
+ * relative seeks.
+ */
+static loff_t vbsf_reg_llseek(struct file *file, loff_t off, int whence)
+{
+ SFLOGFLOW(("vbsf_reg_llseek: file=%p off=%lld whence=%d\n", file, off, whence));
- BUG_ON(nread > PAGE_SIZE);
- if (!nread) {
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
- clear_user_page(page_address(page), vmf->pgoff, page);
-#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
- clear_user_page(page_address(page), vaddr, page);
-#else
- clear_user_page(page_address(page), vaddr);
+ switch (whence) {
+#ifdef SEEK_HOLE
+ case SEEK_HOLE:
+ case SEEK_DATA:
#endif
- } else
- memset(buf + nread, 0, PAGE_SIZE - nread);
-
- flush_dcache_page(page);
- kunmap(page);
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
- vmf->page = page;
- return 0;
+ case SEEK_END: {
+ struct vbsf_reg_info *sf_r = file->private_data;
+ int rc = vbsf_inode_revalidate_with_handle(VBSF_GET_F_DENTRY(file), sf_r->Handle.hHost,
+ true /*fForce*/, false /*fInodeLocked*/);
+ if (rc == 0)
+ break;
+ return rc;
+ }
+ }
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 8)
+ return generic_file_llseek(file, off, whence);
#else
- SET_TYPE(VM_FAULT_MAJOR);
- return page;
+ return default_llseek(file, off, whence);
#endif
}
-static struct vm_operations_struct sf_vma_ops = {
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
- .fault = sf_reg_fault
-#else
- .nopage = sf_reg_nopage
-#endif
-};
-static int sf_reg_mmap(struct file *file, struct vm_area_struct *vma)
+/**
+ * Flush region of file - chiefly mmap/msync.
+ *
+ * We cannot use the noop_fsync / simple_sync_file here as that means
+ * msync(,,MS_SYNC) will return before the data hits the host, thereby
+ * causing coherency issues with O_DIRECT access to the same file as
+ * well as any host interaction with the file.
+ */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0)
+static int vbsf_reg_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
- TRACE();
- if (vma->vm_flags & VM_SHARED) {
- LogFunc(("shared mmapping not available\n"));
- return -EINVAL;
- }
-
- vma->vm_ops = &sf_vma_ops;
- return 0;
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
+ return __generic_file_fsync(file, start, end, datasync);
+# else
+ return generic_file_fsync(file, start, end, datasync);
+# endif
}
-
-struct file_operations sf_reg_fops = {
- .read = sf_reg_read,
- .open = sf_reg_open,
- .write = sf_reg_write,
- .release = sf_reg_release,
- .mmap = sf_reg_mmap,
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
-# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
-/** @todo This code is known to cause caching of data which should not be
- * cached. Investigate. */
-# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23)
- .splice_read = sf_splice_read,
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
+static int vbsf_reg_fsync(struct file *file, int datasync)
+{
+ return generic_file_fsync(file, datasync);
+}
+#else /* < 2.6.35 */
+static int vbsf_reg_fsync(struct file *file, struct dentry *dentry, int datasync)
+{
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)
+ return simple_fsync(file, dentry, datasync);
# else
- .sendfile = generic_file_sendfile,
+ int rc;
+ struct inode *inode = dentry->d_inode;
+ AssertReturn(inode, -EINVAL);
+
+ /** @todo What about file_fsync()? (<= 2.5.11) */
+
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 12)
+ rc = sync_mapping_buffers(inode->i_mapping);
+ if ( rc == 0
+ && (inode->i_state & I_DIRTY)
+ && ((inode->i_state & I_DIRTY_DATASYNC) || !datasync)
+ ) {
+ struct writeback_control wbc = {
+ .sync_mode = WB_SYNC_ALL,
+ .nr_to_write = 0
+ };
+ rc = sync_inode(inode, &wbc);
+ }
+# else /* < 2.5.12 */
+ /** @todo
+ * Somethings is buggy here or in the 2.4.21-27.EL kernel I'm testing on.
+ *
+ * In theory we shouldn't need to do anything here, since msync will call
+ * writepage() on each dirty page and we write them out synchronously. So, the
+ * problem is elsewhere... Doesn't happen all the time either. Sigh.
+ */
+ rc = fsync_inode_buffers(inode);
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 10)
+ if (rc == 0 && datasync)
+ rc = fsync_inode_data_buffers(inode);
+# endif
+
+# endif /* < 2.5.12 */
+ return rc;
# endif
- .aio_read = generic_file_aio_read,
- .aio_write = generic_file_aio_write,
+}
+#endif /* < 2.6.35 */
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)
+/**
+ * Copy a datablock from one file to another on the host side.
+ */
+static ssize_t vbsf_reg_copy_file_range(struct file *pFileSrc, loff_t offSrc, struct file *pFileDst, loff_t offDst,
+ size_t cbRange, unsigned int fFlags)
+{
+ ssize_t cbRet;
+ if (g_uSfLastFunction >= SHFL_FN_COPY_FILE_PART) {
+ struct inode *pInodeSrc = pFileSrc->f_inode;
+ struct vbsf_inode_info *pInodeInfoSrc = VBSF_GET_INODE_INFO(pInodeSrc);
+ struct vbsf_super_info *pSuperInfoSrc = VBSF_GET_SUPER_INFO(pInodeSrc->i_sb);
+ struct vbsf_reg_info *pFileInfoSrc = (struct vbsf_reg_info *)pFileSrc->private_data;
+ struct inode *pInodeDst = pInodeSrc;
+ struct vbsf_inode_info *pInodeInfoDst = VBSF_GET_INODE_INFO(pInodeDst);
+ struct vbsf_super_info *pSuperInfoDst = VBSF_GET_SUPER_INFO(pInodeDst->i_sb);
+ struct vbsf_reg_info *pFileInfoDst = (struct vbsf_reg_info *)pFileDst->private_data;
+ VBOXSFCOPYFILEPARTREQ *pReq;
+
+ /*
+ * Some extra validation.
+ */
+ AssertPtrReturn(pInodeInfoSrc, -EOPNOTSUPP);
+ Assert(pInodeInfoSrc->u32Magic == SF_INODE_INFO_MAGIC);
+ AssertPtrReturn(pInodeInfoDst, -EOPNOTSUPP);
+ Assert(pInodeInfoDst->u32Magic == SF_INODE_INFO_MAGIC);
+
+# if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
+ if (!S_ISREG(pInodeSrc->i_mode) || !S_ISREG(pInodeDst->i_mode))
+ return S_ISDIR(pInodeSrc->i_mode) || S_ISDIR(pInodeDst->i_mode) ? -EISDIR : -EINVAL;
# endif
-# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
- .fsync = noop_fsync,
-# else
- .fsync = simple_sync_file,
+
+ /*
+ * Allocate the request and issue it.
+ */
+ pReq = (VBOXSFCOPYFILEPARTREQ *)VbglR0PhysHeapAlloc(sizeof(*pReq));
+ if (pReq) {
+ int vrc = VbglR0SfHostReqCopyFilePart(pSuperInfoSrc->map.root, pFileInfoSrc->Handle.hHost, offSrc,
+ pSuperInfoDst->map.root, pFileInfoDst->Handle.hHost, offDst,
+ cbRange, 0 /*fFlags*/, pReq);
+ if (RT_SUCCESS(vrc))
+ cbRet = pReq->Parms.cb64ToCopy.u.value64;
+ else if (vrc == VERR_NOT_IMPLEMENTED)
+ cbRet = -EOPNOTSUPP;
+ else
+ cbRet = -RTErrConvertToErrno(vrc);
+
+ VbglR0PhysHeapFree(pReq);
+ } else
+ cbRet = -ENOMEM;
+ } else {
+ cbRet = -EOPNOTSUPP;
+ }
+ SFLOGFLOW(("vbsf_reg_copy_file_range: returns %zd\n", cbRet));
+ return cbRet;
+}
+#endif /* > 4.5 */
+
+
+#ifdef SFLOG_ENABLED
+/*
+ * This is just for logging page faults and such.
+ */
+
+/** Pointer to the ops generic_file_mmap returns the first time it's called. */
+static struct vm_operations_struct const *g_pGenericFileVmOps = NULL;
+/** Merge of g_LoggingVmOpsTemplate and g_pGenericFileVmOps. */
+static struct vm_operations_struct g_LoggingVmOps;
+
+
+/* Generic page fault callback: */
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
+static vm_fault_t vbsf_vmlog_fault(struct vm_fault *vmf)
+{
+ vm_fault_t rc;
+ SFLOGFLOW(("vbsf_vmlog_fault: vmf=%p flags=%#x addr=%p\n", vmf, vmf->flags, vmf->address));
+ rc = g_pGenericFileVmOps->fault(vmf);
+ SFLOGFLOW(("vbsf_vmlog_fault: returns %d\n", rc));
+ return rc;
+}
+# elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23)
+static int vbsf_vmlog_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ int rc;
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+ SFLOGFLOW(("vbsf_vmlog_fault: vma=%p vmf=%p flags=%#x addr=%p\n", vma, vmf, vmf->flags, vmf->address));
+# else
+ SFLOGFLOW(("vbsf_vmlog_fault: vma=%p vmf=%p flags=%#x addr=%p\n", vma, vmf, vmf->flags, vmf->virtual_address));
+# endif
+ rc = g_pGenericFileVmOps->fault(vma, vmf);
+ SFLOGFLOW(("vbsf_vmlog_fault: returns %d\n", rc));
+ return rc;
+}
# endif
- .llseek = generic_file_llseek,
-#endif
-};
-struct inode_operations sf_reg_iops = {
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
- .revalidate = sf_inode_revalidate
-#else
- .getattr = sf_getattr,
- .setattr = sf_setattr
-#endif
-};
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
+/* Special/generic page fault handler: */
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 26)
+# elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 1)
+static struct page *vbsf_vmlog_nopage(struct vm_area_struct *vma, unsigned long address, int *type)
+{
+ struct page *page;
+ SFLOGFLOW(("vbsf_vmlog_nopage: vma=%p address=%p type=%p:{%#x}\n", vma, address, type, type ? *type : 0));
+ page = g_pGenericFileVmOps->nopage(vma, address, type);
+ SFLOGFLOW(("vbsf_vmlog_nopage: returns %p\n", page));
+ return page;
+}
+# else
+static struct page *vbsf_vmlog_nopage(struct vm_area_struct *vma, unsigned long address, int write_access_or_unused)
+{
+ struct page *page;
+ SFLOGFLOW(("vbsf_vmlog_nopage: vma=%p address=%p wau=%d\n", vma, address, write_access_or_unused));
+ page = g_pGenericFileVmOps->nopage(vma, address, write_access_or_unused);
+ SFLOGFLOW(("vbsf_vmlog_nopage: returns %p\n", page));
+ return page;
+}
+# endif /* < 2.6.26 */
+
-static int sf_readpage(struct file *file, struct page *page)
+/* Special page fault callback for making something writable: */
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
+static vm_fault_t vbsf_vmlog_page_mkwrite(struct vm_fault *vmf)
+{
+ vm_fault_t rc;
+ SFLOGFLOW(("vbsf_vmlog_page_mkwrite: vmf=%p flags=%#x addr=%p\n", vmf, vmf->flags, vmf->address));
+ rc = g_pGenericFileVmOps->page_mkwrite(vmf);
+ SFLOGFLOW(("vbsf_vmlog_page_mkwrite: returns %d\n", rc));
+ return rc;
+}
+# elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)
+static int vbsf_vmlog_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
{
- struct inode *inode = GET_F_DENTRY(file)->d_inode;
- struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
- struct sf_reg_info *sf_r = file->private_data;
- uint32_t nread = PAGE_SIZE;
- char *buf;
- loff_t off = ((loff_t) page->index) << PAGE_SHIFT;
- int ret;
+ int rc;
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+ SFLOGFLOW(("vbsf_vmlog_page_mkwrite: vma=%p vmf=%p flags=%#x addr=%p\n", vma, vmf, vmf->flags, vmf->address));
+# else
+ SFLOGFLOW(("vbsf_vmlog_page_mkwrite: vma=%p vmf=%p flags=%#x addr=%p\n", vma, vmf, vmf->flags, vmf->virtual_address));
+# endif
+ rc = g_pGenericFileVmOps->page_mkwrite(vma, vmf);
+ SFLOGFLOW(("vbsf_vmlog_page_mkwrite: returns %d\n", rc));
+ return rc;
+}
+# elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)
+static int vbsf_vmlog_page_mkwrite(struct vm_area_struct *vma, struct page *page)
+{
+ int rc;
+ SFLOGFLOW(("vbsf_vmlog_page_mkwrite: vma=%p page=%p\n", vma, page));
+ rc = g_pGenericFileVmOps->page_mkwrite(vma, page);
+ SFLOGFLOW(("vbsf_vmlog_page_mkwrite: returns %d\n", rc));
+ return rc;
+}
+# endif
- TRACE();
- buf = kmap(page);
- ret = sf_reg_read_aux(__func__, sf_g, sf_r, buf, &nread, off);
- if (ret) {
- kunmap(page);
- if (PageLocked(page))
- unlock_page(page);
- return ret;
- }
- BUG_ON(nread > PAGE_SIZE);
- memset(&buf[nread], 0, PAGE_SIZE - nread);
- flush_dcache_page(page);
- kunmap(page);
- SetPageUptodate(page);
- unlock_page(page);
- return 0;
+/* Special page fault callback for mapping pages: */
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+static void vbsf_vmlog_map_pages(struct vm_fault *vmf, pgoff_t start, pgoff_t end)
+{
+ SFLOGFLOW(("vbsf_vmlog_map_pages: vmf=%p (flags=%#x addr=%p) start=%p end=%p\n", vmf, vmf->flags, vmf->address, start, end));
+ g_pGenericFileVmOps->map_pages(vmf, start, end);
+ SFLOGFLOW(("vbsf_vmlog_map_pages: returns\n"));
+}
+# elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
+static void vbsf_vmlog_map_pages(struct fault_env *fenv, pgoff_t start, pgoff_t end)
+{
+ SFLOGFLOW(("vbsf_vmlog_map_pages: fenv=%p (flags=%#x addr=%p) start=%p end=%p\n", fenv, fenv->flags, fenv->address, start, end));
+ g_pGenericFileVmOps->map_pages(fenv, start, end);
+ SFLOGFLOW(("vbsf_vmlog_map_pages: returns\n"));
+}
+# elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0)
+static void vbsf_vmlog_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ SFLOGFLOW(("vbsf_vmlog_map_pages: vma=%p vmf=%p (flags=%#x addr=%p)\n", vma, vmf, vmf->flags, vmf->virtual_address));
+ g_pGenericFileVmOps->map_pages(vma, vmf);
+ SFLOGFLOW(("vbsf_vmlog_map_pages: returns\n"));
}
+# endif
+
+
+/** Overload template. */
+static struct vm_operations_struct const g_LoggingVmOpsTemplate = {
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23)
+ .fault = vbsf_vmlog_fault,
+# endif
+# if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 25)
+ .nopage = vbsf_vmlog_nopage,
+# endif
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)
+ .page_mkwrite = vbsf_vmlog_page_mkwrite,
+# endif
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0)
+ .map_pages = vbsf_vmlog_map_pages,
+# endif
+};
-static int sf_writepage(struct page *page, struct writeback_control *wbc)
+/** file_operations::mmap wrapper for logging purposes. */
+extern int vbsf_reg_mmap(struct file *file, struct vm_area_struct *vma)
{
- struct address_space *mapping = page->mapping;
- struct inode *inode = mapping->host;
- struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
- struct sf_inode_info *sf_i = GET_INODE_INFO(inode);
- struct file *file = sf_i->file;
- struct sf_reg_info *sf_r = file->private_data;
- char *buf;
- uint32_t nwritten = PAGE_SIZE;
- int end_index = inode->i_size >> PAGE_SHIFT;
- loff_t off = ((loff_t) page->index) << PAGE_SHIFT;
- int err;
+ int rc;
+ SFLOGFLOW(("vbsf_reg_mmap: file=%p vma=%p\n", file, vma));
+ rc = generic_file_mmap(file, vma);
+ if (rc == 0) {
+ /* Merge the ops and template the first time thru (there's a race here). */
+ if (g_pGenericFileVmOps == NULL) {
+ uintptr_t const *puSrc1 = (uintptr_t *)vma->vm_ops;
+ uintptr_t const *puSrc2 = (uintptr_t *)&g_LoggingVmOpsTemplate;
+ uintptr_t volatile *puDst = (uintptr_t *)&g_LoggingVmOps;
+ size_t cbLeft = sizeof(g_LoggingVmOps) / sizeof(*puDst);
+ while (cbLeft-- > 0) {
+ *puDst = *puSrc2 && *puSrc1 ? *puSrc2 : *puSrc1;
+ puSrc1++;
+ puSrc2++;
+ puDst++;
+ }
+ g_pGenericFileVmOps = vma->vm_ops;
+ vma->vm_ops = &g_LoggingVmOps;
+ } else if (g_pGenericFileVmOps == vma->vm_ops)
+ vma->vm_ops = &g_LoggingVmOps;
+ else
+ SFLOGFLOW(("vbsf_reg_mmap: Warning: vm_ops=%p, expected %p!\n", vma->vm_ops, g_pGenericFileVmOps));
+ }
+ SFLOGFLOW(("vbsf_reg_mmap: returns %d\n", rc));
+ return rc;
+}
+
+#endif /* SFLOG_ENABLED */
- TRACE();
- if (page->index >= end_index)
- nwritten = inode->i_size & (PAGE_SIZE - 1);
+/**
+ * File operations for regular files.
+ *
+ * Note on splice_read/splice_write/sendfile:
+ * - Splice was introduced in 2.6.17. The generic_file_splice_read/write
+ * methods go thru the page cache, which is undesirable and is why we
+ * need to cook our own versions of the code as long as we cannot track
+ * host-side writes and correctly invalidate the guest page-cache.
+ * - Sendfile reimplemented using splice in 2.6.23.
+ * - The default_file_splice_read/write no-page-cache fallback functions,
+ * were introduced in 2.6.31. The write one work in page units.
+ * - Since linux 3.16 there is iter_file_splice_write that uses iter_write.
+ * - Since linux 4.9 the generic_file_splice_read function started using
+ * read_iter.
+ */
+struct file_operations vbsf_reg_fops = {
+ .open = vbsf_reg_open,
+ .read = vbsf_reg_read,
+ .write = vbsf_reg_write,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
+ .read_iter = vbsf_reg_read_iter,
+ .write_iter = vbsf_reg_write_iter,
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
+ .aio_read = vbsf_reg_aio_read,
+ .aio_write = vbsf_reg_aio_write,
+#endif
+ .release = vbsf_reg_release,
+#ifdef SFLOG_ENABLED
+ .mmap = vbsf_reg_mmap,
+#else
+ .mmap = generic_file_mmap,
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 17) && LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
+ .splice_read = vbsf_splice_read,
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
+ .splice_write = iter_file_splice_write,
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 17)
+ .splice_write = vbsf_splice_write,
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 30) && LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 23)
+ .sendfile = vbsf_reg_sendfile,
+#endif
+ .llseek = vbsf_reg_llseek,
+ .fsync = vbsf_reg_fsync,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)
+ .copy_file_range = vbsf_reg_copy_file_range,
+#endif
+};
- buf = kmap(page);
- err = sf_reg_write_aux(__func__, sf_g, sf_r, buf, &nwritten, off);
- if (err < 0) {
- ClearPageUptodate(page);
- goto out;
- }
+/**
+ * Inodes operations for regular files.
+ */
+struct inode_operations vbsf_reg_iops = {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 18)
+ .getattr = vbsf_inode_getattr,
+#else
+ .revalidate = vbsf_inode_revalidate,
+#endif
+ .setattr = vbsf_inode_setattr,
+};
- if (off > inode->i_size)
- inode->i_size = off;
- if (PageError(page))
- ClearPageError(page);
- err = 0;
- out:
- kunmap(page);
+/*********************************************************************************************************************************
+* Address Space Operations on Regular Files (for mmap, sendfile, direct I/O) *
+*********************************************************************************************************************************/
- unlock_page(page);
- return err;
+/**
+ * Used to read the content of a page into the page cache.
+ *
+ * Needed for mmap and reads+writes when the file is mmapped in a
+ * shared+writeable fashion.
+ */
+static int vbsf_readpage(struct file *file, struct page *page)
+{
+ struct inode *inode = VBSF_GET_F_DENTRY(file)->d_inode;
+ int err;
+
+ SFLOGFLOW(("vbsf_readpage: inode=%p file=%p page=%p off=%#llx\n", inode, file, page, (uint64_t)page->index << PAGE_SHIFT));
+ Assert(PageLocked(page));
+
+ if (PageUptodate(page)) {
+ unlock_page(page);
+ return 0;
+ }
+
+ if (!is_bad_inode(inode)) {
+ VBOXSFREADPGLSTREQ *pReq = (VBOXSFREADPGLSTREQ *)VbglR0PhysHeapAlloc(sizeof(*pReq));
+ if (pReq) {
+ struct vbsf_super_info *pSuperInfo = VBSF_GET_SUPER_INFO(inode->i_sb);
+ struct vbsf_reg_info *sf_r = file->private_data;
+ uint32_t cbRead;
+ int vrc;
+
+ pReq->PgLst.offFirstPage = 0;
+ pReq->PgLst.aPages[0] = page_to_phys(page);
+ vrc = VbglR0SfHostReqReadPgLst(pSuperInfo->map.root,
+ pReq,
+ sf_r->Handle.hHost,
+ (uint64_t)page->index << PAGE_SHIFT,
+ PAGE_SIZE,
+ 1 /*cPages*/);
+
+ cbRead = pReq->Parms.cb32Read.u.value32;
+ AssertStmt(cbRead <= PAGE_SIZE, cbRead = PAGE_SIZE);
+ VbglR0PhysHeapFree(pReq);
+
+ if (RT_SUCCESS(vrc)) {
+ if (cbRead == PAGE_SIZE) {
+ /* likely */
+ } else {
+ uint8_t *pbMapped = (uint8_t *)kmap(page);
+ RT_BZERO(&pbMapped[cbRead], PAGE_SIZE - cbRead);
+ kunmap(page);
+ /** @todo truncate the inode file size? */
+ }
+
+ flush_dcache_page(page);
+ SetPageUptodate(page);
+ unlock_page(page);
+ return 0;
+ }
+ err = -RTErrConvertToErrno(vrc);
+ } else
+ err = -ENOMEM;
+ } else
+ err = -EIO;
+ SetPageError(page);
+ unlock_page(page);
+ return err;
}
-# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
-int sf_write_begin(struct file *file, struct address_space *mapping, loff_t pos,
- unsigned len, unsigned flags, struct page **pagep,
- void **fsdata)
+/**
+ * Used to write out the content of a dirty page cache page to the host file.
+ *
+ * Needed for mmap and writes when the file is mmapped in a shared+writeable
+ * fashion.
+ */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 52)
+static int vbsf_writepage(struct page *page, struct writeback_control *wbc)
+#else
+static int vbsf_writepage(struct page *page)
+#endif
{
- TRACE();
-
- return simple_write_begin(file, mapping, pos, len, flags, pagep,
- fsdata);
+ struct address_space *mapping = page->mapping;
+ struct inode *inode = mapping->host;
+ struct vbsf_inode_info *sf_i = VBSF_GET_INODE_INFO(inode);
+ struct vbsf_handle *pHandle = vbsf_handle_find(sf_i, VBSF_HANDLE_F_WRITE, VBSF_HANDLE_F_APPEND);
+ int err;
+
+ SFLOGFLOW(("vbsf_writepage: inode=%p page=%p off=%#llx pHandle=%p (%#llx)\n",
+ inode, page, (uint64_t)page->index << PAGE_SHIFT, pHandle, pHandle ? pHandle->hHost : 0));
+
+ if (pHandle) {
+ struct vbsf_super_info *pSuperInfo = VBSF_GET_SUPER_INFO(inode->i_sb);
+ VBOXSFWRITEPGLSTREQ *pReq = (VBOXSFWRITEPGLSTREQ *)VbglR0PhysHeapAlloc(sizeof(*pReq));
+ if (pReq) {
+ uint64_t const cbFile = i_size_read(inode);
+ uint64_t const offInFile = (uint64_t)page->index << PAGE_SHIFT;
+ uint32_t const cbToWrite = page->index != (cbFile >> PAGE_SHIFT) ? PAGE_SIZE
+ : (uint32_t)cbFile & (uint32_t)PAGE_OFFSET_MASK;
+ int vrc;
+
+ pReq->PgLst.offFirstPage = 0;
+ pReq->PgLst.aPages[0] = page_to_phys(page);
+ vrc = VbglR0SfHostReqWritePgLst(pSuperInfo->map.root,
+ pReq,
+ pHandle->hHost,
+ offInFile,
+ cbToWrite,
+ 1 /*cPages*/);
+ sf_i->ModificationTimeAtOurLastWrite = sf_i->ModificationTime;
+ AssertMsgStmt(pReq->Parms.cb32Write.u.value32 == cbToWrite || RT_FAILURE(vrc), /* lazy bird */
+ ("%#x vs %#x\n", pReq->Parms.cb32Write, cbToWrite),
+ vrc = VERR_WRITE_ERROR);
+ VbglR0PhysHeapFree(pReq);
+
+ if (RT_SUCCESS(vrc)) {
+ /* Update the inode if we've extended the file. */
+ /** @todo is this necessary given the cbToWrite calc above? */
+ uint64_t const offEndOfWrite = offInFile + cbToWrite;
+ if ( offEndOfWrite > cbFile
+ && offEndOfWrite > i_size_read(inode))
+ i_size_write(inode, offEndOfWrite);
+
+ /* Update and unlock the page. */
+ if (PageError(page))
+ ClearPageError(page);
+ SetPageUptodate(page);
+ unlock_page(page);
+
+ vbsf_handle_release(pHandle, pSuperInfo, "vbsf_writepage");
+ return 0;
+ }
+
+ /*
+ * We failed.
+ */
+ err = -EIO;
+ } else
+ err = -ENOMEM;
+ vbsf_handle_release(pHandle, pSuperInfo, "vbsf_writepage");
+ } else {
+ /** @todo we could re-open the file here and deal with this... */
+ static uint64_t volatile s_cCalls = 0;
+ if (s_cCalls++ < 16)
+ printk("vbsf_writepage: no writable handle for %s..\n", sf_i->path->String.ach);
+ err = -EIO;
+ }
+ SetPageError(page);
+ unlock_page(page);
+ return err;
}
-int sf_write_end(struct file *file, struct address_space *mapping, loff_t pos,
- unsigned len, unsigned copied, struct page *page, void *fsdata)
-{
- struct inode *inode = mapping->host;
- struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
- struct sf_reg_info *sf_r = file->private_data;
- void *buf;
- unsigned from = pos & (PAGE_SIZE - 1);
- uint32_t nwritten = len;
- int err;
- TRACE();
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
+/**
+ * Called when writing thru the page cache (which we shouldn't be doing).
+ */
+int vbsf_write_begin(struct file *file, struct address_space *mapping, loff_t pos,
+ unsigned len, unsigned flags, struct page **pagep, void **fsdata)
+{
+ /** @todo r=bird: We shouldn't ever get here, should we? Because we don't use
+ * the page cache for any writes AFAIK. We could just as well use
+ * simple_write_begin & simple_write_end here if we think we really
+ * need to have non-NULL function pointers in the table... */
+ static uint64_t volatile s_cCalls = 0;
+ if (s_cCalls++ < 16) {
+ printk("vboxsf: Unexpected call to vbsf_write_begin(pos=%#llx len=%#x flags=%#x)! Please report.\n",
+ (unsigned long long)pos, len, flags);
+ RTLogBackdoorPrintf("vboxsf: Unexpected call to vbsf_write_begin(pos=%#llx len=%#x flags=%#x)! Please report.\n",
+ (unsigned long long)pos, len, flags);
+# ifdef WARN_ON
+ WARN_ON(1);
+# endif
+ }
+ return simple_write_begin(file, mapping, pos, len, flags, pagep, fsdata);
+}
+#endif /* KERNEL_VERSION >= 2.6.24 */
- buf = kmap(page);
- err =
- sf_reg_write_aux(__func__, sf_g, sf_r, buf + from, &nwritten, pos);
- kunmap(page);
- if (err >= 0) {
- if (!PageUptodate(page) && nwritten == PAGE_SIZE)
- SetPageUptodate(page);
-
- pos += nwritten;
- if (pos > inode->i_size)
- inode->i_size = pos;
- }
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 10)
- unlock_page(page);
-# if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0)
- put_page(page);
-# else
- page_cache_release(page);
-# endif
+# ifdef VBOX_UEK
+# undef iov_iter /* HACK ALERT! Don't put anything needing vbsf_iov_iter after this fun! */
+# endif
- return nwritten;
+/**
+ * This is needed to make open accept O_DIRECT as well as dealing with direct
+ * I/O requests if we don't intercept them earlier.
+ */
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
+static ssize_t vbsf_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
+# elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
+static ssize_t vbsf_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
+# elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0) || defined(VBOX_UEK)
+static ssize_t vbsf_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
+# elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 6)
+static ssize_t vbsf_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t offset, unsigned long nr_segs)
+# elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 55)
+static int vbsf_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t offset, unsigned long nr_segs)
+# elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 41)
+static int vbsf_direct_IO(int rw, struct file *file, const struct iovec *iov, loff_t offset, unsigned long nr_segs)
+# elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 35)
+static int vbsf_direct_IO(int rw, struct inode *inode, const struct iovec *iov, loff_t offset, unsigned long nr_segs)
+# elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 26)
+static int vbsf_direct_IO(int rw, struct inode *inode, char *buf, loff_t offset, size_t count)
+# elif LINUX_VERSION_CODE == KERNEL_VERSION(2, 4, 21) && defined(I_NEW) /* RHEL3 Frankenkernel. */
+static int vbsf_direct_IO(int rw, struct file *file, struct kiobuf *buf, unsigned long whatever1, int whatever2)
+# else
+static int vbsf_direct_IO(int rw, struct inode *inode, struct kiobuf *buf, unsigned long whatever1, int whatever2)
+# endif
+{
+ TRACE();
+ return -EINVAL;
}
-# endif /* KERNEL_VERSION >= 2.6.24 */
+#endif
-struct address_space_operations sf_reg_aops = {
- .readpage = sf_readpage,
- .writepage = sf_writepage,
-# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
- .write_begin = sf_write_begin,
- .write_end = sf_write_end,
-# else
- .prepare_write = simple_prepare_write,
- .commit_write = simple_commit_write,
-# endif
+/**
+ * Address space (for the page cache) operations for regular files.
+ *
+ * @todo the FsPerf touch/flush (mmap) test fails on 4.4.0 (ubuntu 16.04 lts).
+ */
+struct address_space_operations vbsf_reg_aops = {
+ .readpage = vbsf_readpage,
+ .writepage = vbsf_writepage,
+ /** @todo Need .writepages if we want msync performance... */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 12)
+ .set_page_dirty = __set_page_dirty_buffers,
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
+ .write_begin = vbsf_write_begin,
+ .write_end = simple_write_end,
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 45)
+ .prepare_write = simple_prepare_write,
+ .commit_write = simple_commit_write,
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 10)
+ .direct_IO = vbsf_direct_IO,
+#endif
};
-#endif /* LINUX_VERSION_CODE >= 2.6.0 */
-
diff --git a/ubuntu/vbox/vboxsf/revision-generated.h b/ubuntu/vbox/vboxsf/revision-generated.h
index a787df83ff85..fbe696a898a3 100644
--- a/ubuntu/vbox/vboxsf/revision-generated.h
+++ b/ubuntu/vbox/vboxsf/revision-generated.h
@@ -1 +1 @@
-#define VBOX_SVN_REV 128164
+#define VBOX_SVN_REV 129722
diff --git a/ubuntu/vbox/vboxsf/utils.c b/ubuntu/vbox/vboxsf/utils.c
index d14bbb31e669..e2256f8bde2c 100644
--- a/ubuntu/vbox/vboxsf/utils.c
+++ b/ubuntu/vbox/vboxsf/utils.c
@@ -33,851 +33,1215 @@
#include "vfsmod.h"
#include <iprt/asm.h>
#include <iprt/err.h>
-#include <linux/nfs_fs.h>
#include <linux/vfs.h>
-/* #define USE_VMALLOC */
-/*
- * sf_reg_aops and sf_backing_dev_info are just quick implementations to make
- * sendfile work. For more information have a look at
- *
- * http://us1.samba.org/samba/ftp/cifs-cvs/ols2006-fs-tutorial-smf.odp
- *
- * and the sample implementation
- *
- * http://pserver.samba.org/samba/ftp/cifs-cvs/samplefs.tar.gz
- */
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
-static void sf_ftime_from_timespec(time_t * time, RTTIMESPEC * ts)
+int vbsf_nlscpy(struct vbsf_super_info *pSuperInfo, char *name, size_t name_bound_len,
+ const unsigned char *utf8_name, size_t utf8_len)
{
- int64_t t = RTTimeSpecGetNano(ts);
+ Assert(name_bound_len > 1);
+ Assert(RTStrNLen(utf8_name, utf8_len) == utf8_len);
- do_div(t, 1000000000);
- *time = t;
-}
+ if (pSuperInfo->nls) {
+ const char *in = utf8_name;
+ size_t in_bound_len = utf8_len;
+ char *out = name;
+ size_t out_bound_len = name_bound_len - 1;
+ size_t out_len = 0;
-static void sf_timespec_from_ftime(RTTIMESPEC * ts, time_t * time)
-{
- int64_t t = 1000000000 * *time;
- RTTimeSpecSetNano(ts, t);
-}
-#else /* >= 2.6.0 */
-#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 18, 0)
-static void sf_ftime_from_timespec(struct timespec *tv, RTTIMESPEC *ts)
+ while (in_bound_len) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)
+ unicode_t uni;
+ int cbInEnc = utf8_to_utf32(in, in_bound_len, &uni);
#else
-static void sf_ftime_from_timespec(struct timespec64 *tv, RTTIMESPEC *ts)
+ linux_wchar_t uni;
+ int cbInEnc = utf8_mbtowc(&uni, in, in_bound_len);
#endif
-{
- int64_t t = RTTimeSpecGetNano(ts);
- int64_t nsec;
-
- nsec = do_div(t, 1000000000);
- tv->tv_sec = t;
- tv->tv_nsec = nsec;
+ if (cbInEnc >= 0) {
+ int cbOutEnc = pSuperInfo->nls->uni2char(uni, out, out_bound_len);
+ if (cbOutEnc >= 0) {
+ /*SFLOG3(("vbsf_nlscpy: cbOutEnc=%d cbInEnc=%d uni=%#x in_bound_len=%u\n", cbOutEnc, cbInEnc, uni, in_bound_len));*/
+ out += cbOutEnc;
+ out_bound_len -= cbOutEnc;
+ out_len += cbOutEnc;
+
+ in += cbInEnc;
+ in_bound_len -= cbInEnc;
+ } else {
+ SFLOG(("vbsf_nlscpy: nls->uni2char failed with %d on %#x (pos %u in '%s'), out_bound_len=%u\n",
+ cbOutEnc, uni, in - (const char *)utf8_name, (const char *)utf8_name, (unsigned)out_bound_len));
+ return cbOutEnc;
+ }
+ } else {
+ SFLOG(("vbsf_nlscpy: utf8_to_utf32/utf8_mbtowc failed with %d on %x (pos %u in '%s'), in_bound_len=%u!\n",
+ cbInEnc, *in, in - (const char *)utf8_name, (const char *)utf8_name, (unsigned)in_bound_len));
+ return -EINVAL;
+ }
+ }
+
+ *out = '\0';
+ } else {
+ if (utf8_len + 1 > name_bound_len)
+ return -ENAMETOOLONG;
+
+ memcpy(name, utf8_name, utf8_len + 1);
+ }
+ return 0;
}
-#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 18, 0)
-static void sf_timespec_from_ftime(RTTIMESPEC *ts, struct timespec *tv)
+
+/**
+ * Converts the given NLS string to a host one, kmalloc'ing
+ * the output buffer (use kfree on result).
+ */
+int vbsf_nls_to_shflstring(struct vbsf_super_info *pSuperInfo, const char *pszNls, PSHFLSTRING *ppString)
+{
+ int rc;
+ size_t const cchNls = strlen(pszNls);
+ PSHFLSTRING pString = NULL;
+ if (pSuperInfo->nls) {
+ /*
+ * NLS -> UTF-8 w/ SHLF string header.
+ */
+ /* Calc length first: */
+ size_t cchUtf8 = 0;
+ size_t offNls = 0;
+ while (offNls < cchNls) {
+ linux_wchar_t uc; /* Note! We renamed the type due to clashes. */
+ int const cbNlsCodepoint = pSuperInfo->nls->char2uni(&pszNls[offNls], cchNls - offNls, &uc);
+ if (cbNlsCodepoint >= 0) {
+ char achTmp[16];
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)
+ int cbUtf8Codepoint = utf32_to_utf8(uc, achTmp, sizeof(achTmp));
#else
-static void sf_timespec_from_ftime(RTTIMESPEC *ts, struct timespec64 *tv)
+ int cbUtf8Codepoint = utf8_wctomb(achTmp, uc, sizeof(achTmp));
#endif
-{
- int64_t t = (int64_t) tv->tv_nsec + (int64_t) tv->tv_sec * 1000000000;
- RTTimeSpecSetNano(ts, t);
+ if (cbUtf8Codepoint > 0) {
+ cchUtf8 += cbUtf8Codepoint;
+ offNls += cbNlsCodepoint;
+ } else {
+ Log(("vbsf_nls_to_shflstring: nls->uni2char(%#x) failed: %d\n", uc, cbUtf8Codepoint));
+ return -EINVAL;
+ }
+ } else {
+ Log(("vbsf_nls_to_shflstring: nls->char2uni(%.*Rhxs) failed: %d\n",
+ RT_MIN(8, cchNls - offNls), &pszNls[offNls], cbNlsCodepoint));
+ return -EINVAL;
+ }
+ }
+ if (cchUtf8 + 1 < _64K) {
+ /* Allocate: */
+ pString = (PSHFLSTRING)kmalloc(SHFLSTRING_HEADER_SIZE + cchUtf8 + 1, GFP_KERNEL);
+ if (pString) {
+ char *pchDst = pString->String.ach;
+ pString->u16Length = (uint16_t)cchUtf8;
+ pString->u16Size = (uint16_t)(cchUtf8 + 1);
+
+ /* Do the conversion (cchUtf8 is counted down): */
+ rc = 0;
+ offNls = 0;
+ while (offNls < cchNls) {
+ linux_wchar_t uc; /* Note! We renamed the type due to clashes. */
+ int const cbNlsCodepoint = pSuperInfo->nls->char2uni(&pszNls[offNls], cchNls - offNls, &uc);
+ if (cbNlsCodepoint >= 0) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)
+ int cbUtf8Codepoint = utf32_to_utf8(uc, pchDst, cchUtf8);
+#else
+ int cbUtf8Codepoint = utf8_wctomb(pchDst, uc, cchUtf8);
+#endif
+ if (cbUtf8Codepoint > 0) {
+ AssertBreakStmt(cbUtf8Codepoint <= cchUtf8, rc = -EINVAL);
+ cchUtf8 -= cbUtf8Codepoint;
+ pchDst += cbUtf8Codepoint;
+ offNls += cbNlsCodepoint;
+ } else {
+ Log(("vbsf_nls_to_shflstring: nls->uni2char(%#x) failed! %d, cchUtf8=%zu\n",
+ uc, cbUtf8Codepoint, cchUtf8));
+ rc = -EINVAL;
+ break;
+ }
+ } else {
+ Log(("vbsf_nls_to_shflstring: nls->char2uni(%.*Rhxs) failed! %d\n",
+ RT_MIN(8, cchNls - offNls), &pszNls[offNls], cbNlsCodepoint));
+ rc = -EINVAL;
+ break;
+ }
+ }
+ if (rc == 0) {
+ /*
+ * Succeeded. Just terminate the string and we're good.
+ */
+ Assert(pchDst - pString->String.ach == pString->u16Length);
+ *pchDst = '\0';
+ } else {
+ kfree(pString);
+ pString = NULL;
+ }
+ } else {
+ Log(("vbsf_nls_to_shflstring: failed to allocate %u bytes\n", SHFLSTRING_HEADER_SIZE + cchUtf8 + 1));
+ rc = -ENOMEM;
+ }
+ } else {
+ Log(("vbsf_nls_to_shflstring: too long: %zu bytes (%zu nls bytes)\n", cchUtf8, cchNls));
+ rc = -ENAMETOOLONG;
+ }
+ } else {
+ /*
+ * UTF-8 -> UTF-8 w/ SHLF string header.
+ */
+ if (cchNls + 1 < _64K) {
+ pString = (PSHFLSTRING)kmalloc(SHFLSTRING_HEADER_SIZE + cchNls + 1, GFP_KERNEL);
+ if (pString) {
+ pString->u16Length = (uint16_t)cchNls;
+ pString->u16Size = (uint16_t)(cchNls + 1);
+ memcpy(pString->String.ach, pszNls, cchNls);
+ pString->String.ach[cchNls] = '\0';
+ rc = 0;
+ } else {
+ Log(("vbsf_nls_to_shflstring: failed to allocate %u bytes\n", SHFLSTRING_HEADER_SIZE + cchNls + 1));
+ rc = -ENOMEM;
+ }
+ } else {
+ Log(("vbsf_nls_to_shflstring: too long: %zu bytes\n", cchNls));
+ rc = -ENAMETOOLONG;
+ }
+ }
+ *ppString = pString;
+ return rc;
}
-#endif /* >= 2.6.0 */
-/* set [inode] attributes based on [info], uid/gid based on [sf_g] */
-void sf_init_inode(struct sf_glob_info *sf_g, struct inode *inode,
- PSHFLFSOBJINFO info)
+
+/**
+ * Convert from VBox to linux time.
+ */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
+DECLINLINE(void) vbsf_time_to_linux(time_t *pLinuxDst, PCRTTIMESPEC pVBoxSrc)
{
- PSHFLFSOBJATTR attr;
- int mode;
+ int64_t t = RTTimeSpecGetNano(pVBoxSrc);
+ do_div(t, RT_NS_1SEC);
+ *pLinuxDst = t;
+}
+#else /* >= 2.6.0 */
+# if LINUX_VERSION_CODE < KERNEL_VERSION(4, 18, 0)
+DECLINLINE(void) vbsf_time_to_linux(struct timespec *pLinuxDst, PCRTTIMESPEC pVBoxSrc)
+# else
+DECLINLINE(void) vbsf_time_to_linux(struct timespec64 *pLinuxDst, PCRTTIMESPEC pVBoxSrc)
+# endif
+{
+ int64_t t = RTTimeSpecGetNano(pVBoxSrc);
+ pLinuxDst->tv_nsec = do_div(t, RT_NS_1SEC);
+ pLinuxDst->tv_sec = t;
+}
+#endif /* >= 2.6.0 */
- TRACE();
- attr = &info->Attr;
+/**
+ * Convert from linux to VBox time.
+ */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
+DECLINLINE(void) vbsf_time_to_vbox(PRTTIMESPEC pVBoxDst, time_t *pLinuxSrc)
+{
+ RTTimeSpecSetNano(pVBoxDst, RT_NS_1SEC_64 * *pLinuxSrc);
+}
+#else /* >= 2.6.0 */
+# if LINUX_VERSION_CODE < KERNEL_VERSION(4, 18, 0)
+DECLINLINE(void) vbsf_time_to_vbox(PRTTIMESPEC pVBoxDst, struct timespec const *pLinuxSrc)
+# else
+DECLINLINE(void) vbsf_time_to_vbox(PRTTIMESPEC pVBoxDst, struct timespec64 const *pLinuxSrc)
+# endif
+{
+ RTTimeSpecSetNano(pVBoxDst, pLinuxSrc->tv_nsec + pLinuxSrc->tv_sec * (int64_t)RT_NS_1SEC);
+}
+#endif /* >= 2.6.0 */
-#define mode_set(r) attr->fMode & (RTFS_UNIX_##r) ? (S_##r) : 0;
- mode = mode_set(IRUSR);
- mode |= mode_set(IWUSR);
- mode |= mode_set(IXUSR);
- mode |= mode_set(IRGRP);
- mode |= mode_set(IWGRP);
- mode |= mode_set(IXGRP);
+/**
+ * Converts VBox access permissions to Linux ones (mode & 0777).
+ *
+ * @note Currently identical.
+ * @sa sf_access_permissions_to_vbox
+ */
+DECLINLINE(int) sf_access_permissions_to_linux(uint32_t fAttr)
+{
+ /* Access bits should be the same: */
+ AssertCompile(RTFS_UNIX_IRUSR == S_IRUSR);
+ AssertCompile(RTFS_UNIX_IWUSR == S_IWUSR);
+ AssertCompile(RTFS_UNIX_IXUSR == S_IXUSR);
+ AssertCompile(RTFS_UNIX_IRGRP == S_IRGRP);
+ AssertCompile(RTFS_UNIX_IWGRP == S_IWGRP);
+ AssertCompile(RTFS_UNIX_IXGRP == S_IXGRP);
+ AssertCompile(RTFS_UNIX_IROTH == S_IROTH);
+ AssertCompile(RTFS_UNIX_IWOTH == S_IWOTH);
+ AssertCompile(RTFS_UNIX_IXOTH == S_IXOTH);
+
+ return fAttr & RTFS_UNIX_ALL_ACCESS_PERMS;
+}
- mode |= mode_set(IROTH);
- mode |= mode_set(IWOTH);
- mode |= mode_set(IXOTH);
-#undef mode_set
+/**
+ * Produce the Linux mode mask, given VBox, mount options and file type.
+ */
+DECLINLINE(int) sf_file_mode_to_linux(uint32_t fVBoxMode, int fFixedMode, int fClearMask, int fType)
+{
+ int fLnxMode = sf_access_permissions_to_linux(fVBoxMode);
+ if (fFixedMode != ~0)
+ fLnxMode = fFixedMode & 0777;
+ fLnxMode &= ~fClearMask;
+ fLnxMode |= fType;
+ return fLnxMode;
+}
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
- inode->i_mapping->a_ops = &sf_reg_aops;
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(3, 19, 0)
- /* XXX Was this ever necessary? */
- inode->i_mapping->backing_dev_info = &sf_g->bdi;
-#endif
-#endif
- if (RTFS_IS_DIRECTORY(attr->fMode)) {
- inode->i_mode = sf_g->dmode != ~0 ? (sf_g->dmode & 0777) : mode;
- inode->i_mode &= ~sf_g->dmask;
- inode->i_mode |= S_IFDIR;
- inode->i_op = &sf_dir_iops;
- inode->i_fop = &sf_dir_fops;
- /* XXX: this probably should be set to the number of entries
- in the directory plus two (. ..) */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)
- set_nlink(inode, 1);
-#else
- inode->i_nlink = 1;
-#endif
- }
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
- else if (RTFS_IS_SYMLINK(attr->fMode)) {
- inode->i_mode = sf_g->fmode != ~0 ? (sf_g->fmode & 0777) : mode;
- inode->i_mode &= ~sf_g->fmask;
- inode->i_mode |= S_IFLNK;
- inode->i_op = &sf_lnk_iops;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)
- set_nlink(inode, 1);
-#else
- inode->i_nlink = 1;
-#endif
- }
-#endif
- else {
- inode->i_mode = sf_g->fmode != ~0 ? (sf_g->fmode & 0777) : mode;
- inode->i_mode &= ~sf_g->fmask;
- inode->i_mode |= S_IFREG;
- inode->i_op = &sf_reg_iops;
- inode->i_fop = &sf_reg_fops;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)
- set_nlink(inode, 1);
-#else
- inode->i_nlink = 1;
+/**
+ * Initializes the @a inode attributes based on @a pObjInfo and @a pSuperInfo
+ * options.
+ */
+void vbsf_init_inode(struct inode *inode, struct vbsf_inode_info *sf_i, PSHFLFSOBJINFO pObjInfo,
+ struct vbsf_super_info *pSuperInfo)
+{
+ PCSHFLFSOBJATTR pAttr = &pObjInfo->Attr;
+
+ TRACE();
+
+ sf_i->ts_up_to_date = jiffies;
+ sf_i->force_restat = 0;
+
+ if (RTFS_IS_DIRECTORY(pAttr->fMode)) {
+ inode->i_mode = sf_file_mode_to_linux(pAttr->fMode, pSuperInfo->dmode, pSuperInfo->dmask, S_IFDIR);
+ inode->i_op = &vbsf_dir_iops;
+ inode->i_fop = &vbsf_dir_fops;
+
+ /* XXX: this probably should be set to the number of entries
+ in the directory plus two (. ..) */
+ set_nlink(inode, 1);
+ }
+ else if (RTFS_IS_SYMLINK(pAttr->fMode)) {
+ /** @todo r=bird: Aren't System V symlinks w/o any mode mask? IIRC there is
+ * no lchmod on Linux. */
+ inode->i_mode = sf_file_mode_to_linux(pAttr->fMode, pSuperInfo->fmode, pSuperInfo->fmask, S_IFLNK);
+ inode->i_op = &vbsf_lnk_iops;
+ set_nlink(inode, 1);
+ } else {
+ inode->i_mode = sf_file_mode_to_linux(pAttr->fMode, pSuperInfo->fmode, pSuperInfo->fmask, S_IFREG);
+ inode->i_op = &vbsf_reg_iops;
+ inode->i_fop = &vbsf_reg_fops;
+ inode->i_mapping->a_ops = &vbsf_reg_aops;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 17) \
+ && LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
+ inode->i_mapping->backing_dev_info = &pSuperInfo->bdi; /* This is needed for mmap. */
#endif
- }
+ set_nlink(inode, 1);
+ }
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
- inode->i_uid = make_kuid(current_user_ns(), sf_g->uid);
- inode->i_gid = make_kgid(current_user_ns(), sf_g->gid);
+ inode->i_uid = make_kuid(current_user_ns(), pSuperInfo->uid);
+ inode->i_gid = make_kgid(current_user_ns(), pSuperInfo->gid);
#else
- inode->i_uid = sf_g->uid;
- inode->i_gid = sf_g->gid;
+ inode->i_uid = pSuperInfo->uid;
+ inode->i_gid = pSuperInfo->gid;
#endif
- inode->i_size = info->cbObject;
+ inode->i_size = pObjInfo->cbObject;
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19) && !defined(KERNEL_FC6)
- inode->i_blksize = 4096;
+ inode->i_blksize = 4096;
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 11)
- inode->i_blkbits = 12;
+ inode->i_blkbits = 12;
#endif
- /* i_blocks always in units of 512 bytes! */
- inode->i_blocks = (info->cbAllocated + 511) / 512;
-
- sf_ftime_from_timespec(&inode->i_atime, &info->AccessTime);
- sf_ftime_from_timespec(&inode->i_ctime, &info->ChangeTime);
- sf_ftime_from_timespec(&inode->i_mtime, &info->ModificationTime);
+ /* i_blocks always in units of 512 bytes! */
+ inode->i_blocks = (pObjInfo->cbAllocated + 511) / 512;
+
+ vbsf_time_to_linux(&inode->i_atime, &pObjInfo->AccessTime);
+ vbsf_time_to_linux(&inode->i_ctime, &pObjInfo->ChangeTime);
+ vbsf_time_to_linux(&inode->i_mtime, &pObjInfo->ModificationTime);
+ sf_i->BirthTime = pObjInfo->BirthTime;
+ sf_i->ModificationTime = pObjInfo->ModificationTime;
+ RTTimeSpecSetSeconds(&sf_i->ModificationTimeAtOurLastWrite, 0);
}
-int sf_stat(const char *caller, struct sf_glob_info *sf_g,
- SHFLSTRING * path, PSHFLFSOBJINFO result, int ok_to_fail)
-{
- int rc;
- SHFLCREATEPARMS params;
- NOREF(caller);
-
- TRACE();
-
- RT_ZERO(params);
- params.Handle = SHFL_HANDLE_NIL;
- params.CreateFlags = SHFL_CF_LOOKUP | SHFL_CF_ACT_FAIL_IF_NEW;
- LogFunc(("sf_stat: calling VbglR0SfCreate, file %s, flags %#x\n",
- path->String.utf8, params.CreateFlags));
- rc = VbglR0SfCreate(&client_handle, &sf_g->map, path, ¶ms);
- if (rc == VERR_INVALID_NAME) {
- /* this can happen for names like 'foo*' on a Windows host */
- return -ENOENT;
- }
- if (RT_FAILURE(rc)) {
- LogFunc(("VbglR0SfCreate(%s) failed. caller=%s, rc=%Rrc\n",
- path->String.utf8, rc, caller));
- return -EPROTO;
- }
- if (params.Result != SHFL_FILE_EXISTS) {
- if (!ok_to_fail)
- LogFunc(("VbglR0SfCreate(%s) file does not exist. caller=%s, result=%d\n", path->String.utf8, params.Result, caller));
- return -ENOENT;
- }
-
- *result = params.Info;
- return 0;
-}
-/* this is called directly as iop on 2.4, indirectly as dop
- [sf_dentry_revalidate] on 2.4/2.6, indirectly as iop through
- [sf_getattr] on 2.6. the job is to find out whether dentry/inode is
- still valid. the test is failed if [dentry] does not have an inode
- or [sf_stat] is unsuccessful, otherwise we return success and
- update inode attributes */
-int sf_inode_revalidate(struct dentry *dentry)
+/**
+ * Update the inode with new object info from the host.
+ *
+ * Called by sf_inode_revalidate() and sf_inode_revalidate_with_handle().
+ */
+void vbsf_update_inode(struct inode *pInode, struct vbsf_inode_info *pInodeInfo, PSHFLFSOBJINFO pObjInfo,
+ struct vbsf_super_info *pSuperInfo, bool fInodeLocked, unsigned fSetAttrs)
{
- int err;
- struct sf_glob_info *sf_g;
- struct sf_inode_info *sf_i;
- SHFLFSOBJINFO info;
-
- TRACE();
- if (!dentry || !dentry->d_inode) {
- LogFunc(("no dentry(%p) or inode(%p)\n", dentry,
- dentry->d_inode));
- return -EINVAL;
- }
-
- sf_g = GET_GLOB_INFO(dentry->d_inode->i_sb);
- sf_i = GET_INODE_INFO(dentry->d_inode);
-
-#if 0
- printk("%s called by %p:%p\n",
- sf_i->path->String.utf8,
- __builtin_return_address(0), __builtin_return_address(1));
-#endif
-
- BUG_ON(!sf_g);
- BUG_ON(!sf_i);
-
- if (!sf_i->force_restat) {
- if (jiffies - dentry->d_time < sf_g->ttl)
- return 0;
- }
-
- err = sf_stat(__func__, sf_g, sf_i->path, &info, 1);
- if (err)
- return err;
+ PCSHFLFSOBJATTR pAttr = &pObjInfo->Attr;
+ int fMode;
- dentry->d_time = jiffies;
- sf_init_inode(sf_g, dentry->d_inode, &info);
- return 0;
-}
+ TRACE();
-/* this is called during name resolution/lookup to check if the
- [dentry] in the cache is still valid. the job is handled by
- [sf_inode_revalidate] */
-static int
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)
-sf_dentry_revalidate(struct dentry *dentry, unsigned flags)
-#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
-sf_dentry_revalidate(struct dentry *dentry, struct nameidata *nd)
-#else
-sf_dentry_revalidate(struct dentry *dentry, int flags)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)
+ if (!fInodeLocked)
+ inode_lock(pInode);
#endif
-{
- TRACE();
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)
- if (flags & LOOKUP_RCU)
- return -ECHILD;
-#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 38)
- /* see Documentation/filesystems/vfs.txt */
- if (nd && nd->flags & LOOKUP_RCU)
- return -ECHILD;
+ /*
+ * Calc new mode mask and update it if it changed.
+ */
+ if (RTFS_IS_DIRECTORY(pAttr->fMode))
+ fMode = sf_file_mode_to_linux(pAttr->fMode, pSuperInfo->dmode, pSuperInfo->dmask, S_IFDIR);
+ else if (RTFS_IS_SYMLINK(pAttr->fMode))
+ /** @todo r=bird: Aren't System V symlinks w/o any mode mask? IIRC there is
+ * no lchmod on Linux. */
+ fMode = sf_file_mode_to_linux(pAttr->fMode, pSuperInfo->fmode, pSuperInfo->fmask, S_IFLNK);
+ else
+ fMode = sf_file_mode_to_linux(pAttr->fMode, pSuperInfo->fmode, pSuperInfo->fmask, S_IFREG);
+
+ if (fMode == pInode->i_mode) {
+ /* likely */
+ } else {
+ if ((fMode & S_IFMT) == (pInode->i_mode & S_IFMT))
+ pInode->i_mode = fMode;
+ else {
+ SFLOGFLOW(("vbsf_update_inode: Changed from %o to %o (%s)\n",
+ pInode->i_mode & S_IFMT, fMode & S_IFMT, pInodeInfo->path->String.ach));
+ /** @todo we probably need to be more drastic... */
+ vbsf_init_inode(pInode, pInodeInfo, pObjInfo, pSuperInfo);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)
+ if (!fInodeLocked)
+ inode_unlock(pInode);
#endif
-
- if (sf_inode_revalidate(dentry))
- return 0;
-
- return 1;
-}
-
-/* on 2.6 this is a proxy for [sf_inode_revalidate] which (as a side
- effect) updates inode attributes for [dentry] (given that [dentry]
- has inode at all) from these new attributes we derive [kstat] via
- [generic_fillattr] */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
-int sf_getattr(const struct path *path, struct kstat *kstat, u32 request_mask,
- unsigned int flags)
+ return;
+ }
+ }
+
+ /*
+ * Update the sizes.
+ * Note! i_blocks is always in units of 512 bytes!
+ */
+ pInode->i_blocks = (pObjInfo->cbAllocated + 511) / 512;
+ i_size_write(pInode, pObjInfo->cbObject);
+
+ /*
+ * Update the timestamps.
+ */
+ vbsf_time_to_linux(&pInode->i_atime, &pObjInfo->AccessTime);
+ vbsf_time_to_linux(&pInode->i_ctime, &pObjInfo->ChangeTime);
+ vbsf_time_to_linux(&pInode->i_mtime, &pObjInfo->ModificationTime);
+ pInodeInfo->BirthTime = pObjInfo->BirthTime;
+
+ /*
+ * Mark it as up to date.
+ * Best to do this before we start with any expensive map invalidation.
+ */
+ pInodeInfo->ts_up_to_date = jiffies;
+ pInodeInfo->force_restat = 0;
+
+ /*
+ * If the modification time changed, we may have to invalidate the page
+ * cache pages associated with this inode if we suspect the change was
+ * made by the host. How supicious we are depends on the cache mode.
+ *
+ * Note! The invalidate_inode_pages() call is pretty weak. It will _not_
+ * touch pages that are already mapped into an address space, but it
+ * will help if the file isn't currently mmap'ed or if we're in read
+ * or read/write caching mode.
+ */
+ if (!RTTimeSpecIsEqual(&pInodeInfo->ModificationTime, &pObjInfo->ModificationTime)) {
+ if (RTFS_IS_FILE(pAttr->fMode)) {
+ if (!(fSetAttrs & (ATTR_MTIME | ATTR_SIZE))) {
+ bool fInvalidate;
+ if (pSuperInfo->enmCacheMode == kVbsfCacheMode_None) {
+ fInvalidate = true; /* No-caching: always invalidate. */
+ } else {
+ if (RTTimeSpecIsEqual(&pInodeInfo->ModificationTimeAtOurLastWrite, &pInodeInfo->ModificationTime)) {
+ fInvalidate = false; /* Could be our write, so don't invalidate anything */
+ RTTimeSpecSetSeconds(&pInodeInfo->ModificationTimeAtOurLastWrite, 0);
+ } else {
+ /*RTLogBackdoorPrintf("vbsf_update_inode: Invalidating the mapping %s - %RU64 vs %RU64 vs %RU64 - %#x\n",
+ pInodeInfo->path->String.ach,
+ RTTimeSpecGetNano(&pInodeInfo->ModificationTimeAtOurLastWrite),
+ RTTimeSpecGetNano(&pInodeInfo->ModificationTime),
+ RTTimeSpecGetNano(&pObjInfo->ModificationTime), fSetAttrs);*/
+ fInvalidate = true; /* We haven't modified the file recently, so probably a host update. */
+ }
+ }
+ pInodeInfo->ModificationTime = pObjInfo->ModificationTime;
+
+ if (fInvalidate) {
+ struct address_space *mapping = pInode->i_mapping;
+ if (mapping && mapping->nrpages > 0) {
+ SFLOGFLOW(("vbsf_update_inode: Invalidating the mapping %s (%#x)\n", pInodeInfo->path->String.ach, fSetAttrs));
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 34)
+ invalidate_mapping_pages(mapping, 0, ~(pgoff_t)0);
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 41)
+ invalidate_inode_pages(mapping);
#else
-int sf_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *kstat)
+ invalidate_inode_pages(pInode);
#endif
-{
- int err;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
- struct dentry *dentry = path->dentry;
+ }
+ }
+ } else {
+ RTTimeSpecSetSeconds(&pInodeInfo->ModificationTimeAtOurLastWrite, 0);
+ pInodeInfo->ModificationTime = pObjInfo->ModificationTime;
+ }
+ } else
+ pInodeInfo->ModificationTime = pObjInfo->ModificationTime;
+ }
+
+ /*
+ * Done.
+ */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)
+ if (!fInodeLocked)
+ inode_unlock(pInode);
#endif
-
- TRACE();
- err = sf_inode_revalidate(dentry);
- if (err)
- return err;
-
- generic_fillattr(dentry->d_inode, kstat);
- return 0;
}
-int sf_setattr(struct dentry *dentry, struct iattr *iattr)
-{
- struct sf_glob_info *sf_g;
- struct sf_inode_info *sf_i;
- SHFLCREATEPARMS params;
- SHFLFSOBJINFO info;
- uint32_t cbBuffer;
- int rc, err;
-
- TRACE();
-
- sf_g = GET_GLOB_INFO(dentry->d_inode->i_sb);
- sf_i = GET_INODE_INFO(dentry->d_inode);
- err = 0;
-
- RT_ZERO(params);
- params.Handle = SHFL_HANDLE_NIL;
- params.CreateFlags = SHFL_CF_ACT_OPEN_IF_EXISTS
- | SHFL_CF_ACT_FAIL_IF_NEW | SHFL_CF_ACCESS_ATTR_WRITE;
-
- /* this is at least required for Posix hosts */
- if (iattr->ia_valid & ATTR_SIZE)
- params.CreateFlags |= SHFL_CF_ACCESS_WRITE;
-
- rc = VbglR0SfCreate(&client_handle, &sf_g->map, sf_i->path, ¶ms);
- if (RT_FAILURE(rc)) {
- LogFunc(("VbglR0SfCreate(%s) failed rc=%Rrc\n",
- sf_i->path->String.utf8, rc));
- err = -RTErrConvertToErrno(rc);
- goto fail2;
- }
- if (params.Result != SHFL_FILE_EXISTS) {
- LogFunc(("file %s does not exist\n", sf_i->path->String.utf8));
- err = -ENOENT;
- goto fail1;
- }
-
- /* Setting the file size and setting the other attributes has to be
- * handled separately, see implementation of vbsfSetFSInfo() in
- * vbsf.cpp */
- if (iattr->ia_valid & (ATTR_MODE | ATTR_ATIME | ATTR_MTIME)) {
-#define mode_set(r) ((iattr->ia_mode & (S_##r)) ? RTFS_UNIX_##r : 0)
-
- RT_ZERO(info);
- if (iattr->ia_valid & ATTR_MODE) {
- info.Attr.fMode = mode_set(IRUSR);
- info.Attr.fMode |= mode_set(IWUSR);
- info.Attr.fMode |= mode_set(IXUSR);
- info.Attr.fMode |= mode_set(IRGRP);
- info.Attr.fMode |= mode_set(IWGRP);
- info.Attr.fMode |= mode_set(IXGRP);
- info.Attr.fMode |= mode_set(IROTH);
- info.Attr.fMode |= mode_set(IWOTH);
- info.Attr.fMode |= mode_set(IXOTH);
-
- if (iattr->ia_mode & S_IFDIR)
- info.Attr.fMode |= RTFS_TYPE_DIRECTORY;
- else
- info.Attr.fMode |= RTFS_TYPE_FILE;
- }
-
- if (iattr->ia_valid & ATTR_ATIME)
- sf_timespec_from_ftime(&info.AccessTime,
- &iattr->ia_atime);
- if (iattr->ia_valid & ATTR_MTIME)
- sf_timespec_from_ftime(&info.ModificationTime,
- &iattr->ia_mtime);
- /* ignore ctime (inode change time) as it can't be set from userland anyway */
-
- cbBuffer = sizeof(info);
- rc = VbglR0SfFsInfo(&client_handle, &sf_g->map, params.Handle,
- SHFL_INFO_SET | SHFL_INFO_FILE, &cbBuffer,
- (PSHFLDIRINFO) & info);
- if (RT_FAILURE(rc)) {
- LogFunc(("VbglR0SfFsInfo(%s, FILE) failed rc=%Rrc\n",
- sf_i->path->String.utf8, rc));
- err = -RTErrConvertToErrno(rc);
- goto fail1;
- }
- }
-
- if (iattr->ia_valid & ATTR_SIZE) {
- RT_ZERO(info);
- info.cbObject = iattr->ia_size;
- cbBuffer = sizeof(info);
- rc = VbglR0SfFsInfo(&client_handle, &sf_g->map, params.Handle,
- SHFL_INFO_SET | SHFL_INFO_SIZE, &cbBuffer,
- (PSHFLDIRINFO) & info);
- if (RT_FAILURE(rc)) {
- LogFunc(("VbglR0SfFsInfo(%s, SIZE) failed rc=%Rrc\n",
- sf_i->path->String.utf8, rc));
- err = -RTErrConvertToErrno(rc);
- goto fail1;
- }
- }
-
- rc = VbglR0SfClose(&client_handle, &sf_g->map, params.Handle);
- if (RT_FAILURE(rc))
- LogFunc(("VbglR0SfClose(%s) failed rc=%Rrc\n",
- sf_i->path->String.utf8, rc));
-
- return sf_inode_revalidate(dentry);
-
- fail1:
- rc = VbglR0SfClose(&client_handle, &sf_g->map, params.Handle);
- if (RT_FAILURE(rc))
- LogFunc(("VbglR0SfClose(%s) failed rc=%Rrc\n",
- sf_i->path->String.utf8, rc));
- fail2:
- return err;
+/** @note Currently only used for the root directory during (re-)mount. */
+int vbsf_stat(const char *caller, struct vbsf_super_info *pSuperInfo, SHFLSTRING *path, PSHFLFSOBJINFO result, int ok_to_fail)
+{
+ int rc;
+ VBOXSFCREATEREQ *pReq;
+ NOREF(caller);
+
+ TRACE();
+
+ pReq = (VBOXSFCREATEREQ *)VbglR0PhysHeapAlloc(sizeof(*pReq) + path->u16Size);
+ if (pReq) {
+ RT_ZERO(*pReq);
+ memcpy(&pReq->StrPath, path, SHFLSTRING_HEADER_SIZE + path->u16Size);
+ pReq->CreateParms.Handle = SHFL_HANDLE_NIL;
+ pReq->CreateParms.CreateFlags = SHFL_CF_LOOKUP | SHFL_CF_ACT_FAIL_IF_NEW;
+
+ LogFunc(("Calling VbglR0SfHostReqCreate on %s\n", path->String.utf8));
+ rc = VbglR0SfHostReqCreate(pSuperInfo->map.root, pReq);
+ if (RT_SUCCESS(rc)) {
+ if (pReq->CreateParms.Result == SHFL_FILE_EXISTS) {
+ *result = pReq->CreateParms.Info;
+ rc = 0;
+ } else {
+ if (!ok_to_fail)
+ LogFunc(("VbglR0SfHostReqCreate on %s: file does not exist: %d (caller=%s)\n",
+ path->String.utf8, pReq->CreateParms.Result, caller));
+ rc = -ENOENT;
+ }
+ } else if (rc == VERR_INVALID_NAME) {
+ rc = -ENOENT; /* this can happen for names like 'foo*' on a Windows host */
+ } else {
+ LogFunc(("VbglR0SfHostReqCreate failed on %s: %Rrc (caller=%s)\n", path->String.utf8, rc, caller));
+ rc = -EPROTO;
+ }
+ VbglR0PhysHeapFree(pReq);
+ }
+ else
+ rc = -ENOMEM;
+ return rc;
}
-#endif /* >= 2.6.0 */
-static int sf_make_path(const char *caller, struct sf_inode_info *sf_i,
- const char *d_name, size_t d_len, SHFLSTRING ** result)
+
+/**
+ * Revalidate an inode, inner worker.
+ *
+ * @sa sf_inode_revalidate()
+ */
+int vbsf_inode_revalidate_worker(struct dentry *dentry, bool fForced, bool fInodeLocked)
{
- size_t path_len, shflstring_len;
- SHFLSTRING *tmp;
- uint16_t p_len;
- uint8_t *p_name;
- int fRoot = 0;
-
- TRACE();
- p_len = sf_i->path->u16Length;
- p_name = sf_i->path->String.utf8;
-
- if (p_len == 1 && *p_name == '/') {
- path_len = d_len + 1;
- fRoot = 1;
- } else {
- /* lengths of constituents plus terminating zero plus slash */
- path_len = p_len + d_len + 2;
- if (path_len > 0xffff) {
- LogFunc(("path too long. caller=%s, path_len=%zu\n",
- caller, path_len));
- return -ENAMETOOLONG;
- }
- }
-
- shflstring_len = offsetof(SHFLSTRING, String.utf8) + path_len;
- tmp = kmalloc(shflstring_len, GFP_KERNEL);
- if (!tmp) {
- LogRelFunc(("kmalloc failed, caller=%s\n", caller));
- return -ENOMEM;
- }
- tmp->u16Length = path_len - 1;
- tmp->u16Size = path_len;
-
- if (fRoot)
- memcpy(&tmp->String.utf8[0], d_name, d_len + 1);
- else {
- memcpy(&tmp->String.utf8[0], p_name, p_len);
- tmp->String.utf8[p_len] = '/';
- memcpy(&tmp->String.utf8[p_len + 1], d_name, d_len);
- tmp->String.utf8[p_len + 1 + d_len] = '\0';
- }
-
- *result = tmp;
- return 0;
+ int rc;
+ struct inode *pInode = dentry ? dentry->d_inode : NULL;
+ if (pInode) {
+ struct vbsf_inode_info *sf_i = VBSF_GET_INODE_INFO(pInode);
+ struct vbsf_super_info *pSuperInfo = VBSF_GET_SUPER_INFO(pInode->i_sb);
+ AssertReturn(sf_i, -EINVAL);
+ AssertReturn(pSuperInfo, -EINVAL);
+
+ /*
+ * Can we get away without any action here?
+ */
+ if ( !fForced
+ && !sf_i->force_restat
+ && jiffies - sf_i->ts_up_to_date < pSuperInfo->cJiffiesInodeTTL)
+ rc = 0;
+ else {
+ /*
+ * No, we have to query the file info from the host.
+ * Try get a handle we can query, any kind of handle will do here.
+ */
+ struct vbsf_handle *pHandle = vbsf_handle_find(sf_i, 0, 0);
+ if (pHandle) {
+ /* Query thru pHandle. */
+ VBOXSFOBJINFOREQ *pReq = (VBOXSFOBJINFOREQ *)VbglR0PhysHeapAlloc(sizeof(*pReq));
+ if (pReq) {
+ RT_ZERO(*pReq);
+ rc = VbglR0SfHostReqQueryObjInfo(pSuperInfo->map.root, pReq, pHandle->hHost);
+ if (RT_SUCCESS(rc)) {
+ /*
+ * Reset the TTL and copy the info over into the inode structure.
+ */
+ vbsf_update_inode(pInode, sf_i, &pReq->ObjInfo, pSuperInfo, fInodeLocked, 0 /*fSetAttrs*/);
+ } else if (rc == VERR_INVALID_HANDLE) {
+ rc = -ENOENT; /* Restore.*/
+ } else {
+ LogFunc(("VbglR0SfHostReqQueryObjInfo failed on %#RX64: %Rrc\n", pHandle->hHost, rc));
+ rc = -RTErrConvertToErrno(rc);
+ }
+ VbglR0PhysHeapFree(pReq);
+ } else
+ rc = -ENOMEM;
+ vbsf_handle_release(pHandle, pSuperInfo, "vbsf_inode_revalidate_worker");
+
+ } else {
+ /* Query via path. */
+ SHFLSTRING *pPath = sf_i->path;
+ VBOXSFCREATEREQ *pReq = (VBOXSFCREATEREQ *)VbglR0PhysHeapAlloc(sizeof(*pReq) + pPath->u16Size);
+ if (pReq) {
+ RT_ZERO(*pReq);
+ memcpy(&pReq->StrPath, pPath, SHFLSTRING_HEADER_SIZE + pPath->u16Size);
+ pReq->CreateParms.Handle = SHFL_HANDLE_NIL;
+ pReq->CreateParms.CreateFlags = SHFL_CF_LOOKUP | SHFL_CF_ACT_FAIL_IF_NEW;
+
+ rc = VbglR0SfHostReqCreate(pSuperInfo->map.root, pReq);
+ if (RT_SUCCESS(rc)) {
+ if (pReq->CreateParms.Result == SHFL_FILE_EXISTS) {
+ /*
+ * Reset the TTL and copy the info over into the inode structure.
+ */
+ vbsf_update_inode(pInode, sf_i, &pReq->CreateParms.Info, pSuperInfo, fInodeLocked, 0 /*fSetAttrs*/);
+ rc = 0;
+ } else {
+ rc = -ENOENT;
+ }
+ } else if (rc == VERR_INVALID_NAME) {
+ rc = -ENOENT; /* this can happen for names like 'foo*' on a Windows host */
+ } else {
+ LogFunc(("VbglR0SfHostReqCreate failed on %s: %Rrc\n", pPath->String.ach, rc));
+ rc = -EPROTO;
+ }
+ VbglR0PhysHeapFree(pReq);
+ }
+ else
+ rc = -ENOMEM;
+ }
+ }
+ } else {
+ LogFunc(("no dentry(%p) or inode(%p)\n", dentry, pInode));
+ rc = -EINVAL;
+ }
+ return rc;
}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 18)
/**
- * [dentry] contains string encoded in coding system that corresponds
- * to [sf_g]->nls, we must convert it to UTF8 here and pass down to
- * [sf_make_path] which will allocate SHFLSTRING and fill it in
+ * Revalidate an inode for 2.4.
+ *
+ * This is called in the stat(), lstat() and readlink() code paths. In the stat
+ * cases the caller will use the result afterwards to produce the stat data.
+ *
+ * @note 2.4.x has a getattr() inode operation too, but it is not used.
*/
-int sf_path_from_dentry(const char *caller, struct sf_glob_info *sf_g,
- struct sf_inode_info *sf_i, struct dentry *dentry,
- SHFLSTRING ** result)
+int vbsf_inode_revalidate(struct dentry *dentry)
{
- int err;
- const char *d_name;
- size_t d_len;
- const char *name;
- size_t len = 0;
-
- TRACE();
- d_name = dentry->d_name.name;
- d_len = dentry->d_name.len;
-
- if (sf_g->nls) {
- size_t in_len, i, out_bound_len;
- const char *in;
- char *out;
-
- in = d_name;
- in_len = d_len;
-
- out_bound_len = PATH_MAX;
- out = kmalloc(out_bound_len, GFP_KERNEL);
- name = out;
-
- for (i = 0; i < d_len; ++i) {
- /* We renamed the linux kernel wchar_t type to linux_wchar_t in
- the-linux-kernel.h, as it conflicts with the C++ type of that name. */
- linux_wchar_t uni;
- int nb;
-
- nb = sf_g->nls->char2uni(in, in_len, &uni);
- if (nb < 0) {
- LogFunc(("nls->char2uni failed %x %d\n",
- *in, in_len));
- err = -EINVAL;
- goto fail1;
- }
- in_len -= nb;
- in += nb;
+ /*
+ * We pretend the inode is locked here, as 2.4.x does not have inode level locking.
+ */
+ return vbsf_inode_revalidate_worker(dentry, false /*fForced*/, true /*fInodeLocked*/);
+}
+#endif /* < 2.5.18 */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)
- nb = utf32_to_utf8(uni, out, out_bound_len);
-#else
- nb = utf8_wctomb(out, uni, out_bound_len);
-#endif
- if (nb < 0) {
- LogFunc(("nls->uni2char failed %x %d\n",
- uni, out_bound_len));
- err = -EINVAL;
- goto fail1;
- }
- out_bound_len -= nb;
- out += nb;
- len += nb;
- }
- if (len >= PATH_MAX - 1) {
- err = -ENAMETOOLONG;
- goto fail1;
- }
-
- LogFunc(("result(%d) = %.*s\n", len, len, name));
- *out = 0;
- } else {
- name = d_name;
- len = d_len;
- }
-
- err = sf_make_path(caller, sf_i, name, len, result);
- if (name != d_name)
- kfree(name);
-
- return err;
- fail1:
- kfree(name);
- return err;
+/**
+ * Similar to sf_inode_revalidate, but uses associated host file handle as that
+ * is quite a bit faster.
+ */
+int vbsf_inode_revalidate_with_handle(struct dentry *dentry, SHFLHANDLE hHostFile, bool fForced, bool fInodeLocked)
+{
+ int err;
+ struct inode *pInode = dentry ? dentry->d_inode : NULL;
+ if (!pInode) {
+ LogFunc(("no dentry(%p) or inode(%p)\n", dentry, pInode));
+ err = -EINVAL;
+ } else {
+ struct vbsf_inode_info *sf_i = VBSF_GET_INODE_INFO(pInode);
+ struct vbsf_super_info *pSuperInfo = VBSF_GET_SUPER_INFO(pInode->i_sb);
+ AssertReturn(sf_i, -EINVAL);
+ AssertReturn(pSuperInfo, -EINVAL);
+
+ /*
+ * Can we get away without any action here?
+ */
+ if ( !fForced
+ && !sf_i->force_restat
+ && jiffies - sf_i->ts_up_to_date < pSuperInfo->cJiffiesInodeTTL)
+ err = 0;
+ else {
+ /*
+ * No, we have to query the file info from the host.
+ */
+ VBOXSFOBJINFOREQ *pReq = (VBOXSFOBJINFOREQ *)VbglR0PhysHeapAlloc(sizeof(*pReq));
+ if (pReq) {
+ RT_ZERO(*pReq);
+ err = VbglR0SfHostReqQueryObjInfo(pSuperInfo->map.root, pReq, hHostFile);
+ if (RT_SUCCESS(err)) {
+ /*
+ * Reset the TTL and copy the info over into the inode structure.
+ */
+ vbsf_update_inode(pInode, sf_i, &pReq->ObjInfo, pSuperInfo, fInodeLocked, 0 /*fSetAttrs*/);
+ } else {
+ LogFunc(("VbglR0SfHostReqQueryObjInfo failed on %#RX64: %Rrc\n", hHostFile, err));
+ err = -RTErrConvertToErrno(err);
+ }
+ VbglR0PhysHeapFree(pReq);
+ } else
+ err = -ENOMEM;
+ }
+ }
+ return err;
}
-int sf_nlscpy(struct sf_glob_info *sf_g,
- char *name, size_t name_bound_len,
- const unsigned char *utf8_name, size_t utf8_len)
-{
- if (sf_g->nls) {
- const char *in;
- char *out;
- size_t out_len;
- size_t out_bound_len;
- size_t in_bound_len;
-
- in = utf8_name;
- in_bound_len = utf8_len;
-
- out = name;
- out_len = 0;
- out_bound_len = name_bound_len;
-
- while (in_bound_len) {
- int nb;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)
- unicode_t uni;
- nb = utf8_to_utf32(in, in_bound_len, &uni);
-#else
- linux_wchar_t uni;
+/* on 2.6 this is a proxy for [sf_inode_revalidate] which (as a side
+ effect) updates inode attributes for [dentry] (given that [dentry]
+ has inode at all) from these new attributes we derive [kstat] via
+ [generic_fillattr] */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 18)
- nb = utf8_mbtowc(&uni, in, in_bound_len);
-#endif
- if (nb < 0) {
- LogFunc(("utf8_mbtowc failed(%s) %x:%d\n",
- (const char *)utf8_name, *in,
- in_bound_len));
- return -EINVAL;
- }
- in += nb;
- in_bound_len -= nb;
-
- nb = sf_g->nls->uni2char(uni, out, out_bound_len);
- if (nb < 0) {
- LogFunc(("nls->uni2char failed(%s) %x:%d\n",
- utf8_name, uni, out_bound_len));
- return nb;
- }
- out += nb;
- out_bound_len -= nb;
- out_len += nb;
- }
-
- *out = 0;
- } else {
- if (utf8_len + 1 > name_bound_len)
- return -ENAMETOOLONG;
-
- memcpy(name, utf8_name, utf8_len + 1);
- }
- return 0;
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
+int vbsf_inode_getattr(const struct path *path, struct kstat *kstat, u32 request_mask, unsigned int flags)
+# else
+int vbsf_inode_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *kstat)
+# endif
+{
+ int rc;
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
+ struct dentry *dentry = path->dentry;
+# endif
+
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
+ SFLOGFLOW(("vbsf_inode_getattr: dentry=%p request_mask=%#x flags=%#x\n", dentry, request_mask, flags));
+# else
+ SFLOGFLOW(("vbsf_inode_getattr: dentry=%p\n", dentry));
+# endif
+
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
+ /*
+ * With the introduction of statx() userland can control whether we
+ * update the inode information or not.
+ */
+ switch (flags & AT_STATX_SYNC_TYPE) {
+ default:
+ rc = vbsf_inode_revalidate_worker(dentry, false /*fForced*/, false /*fInodeLocked*/);
+ break;
+
+ case AT_STATX_FORCE_SYNC:
+ rc = vbsf_inode_revalidate_worker(dentry, true /*fForced*/, false /*fInodeLocked*/);
+ break;
+
+ case AT_STATX_DONT_SYNC:
+ rc = 0;
+ break;
+ }
+# else
+ rc = vbsf_inode_revalidate_worker(dentry, false /*fForced*/, false /*fInodeLocked*/);
+# endif
+ if (rc == 0) {
+ /* Do generic filling in of info. */
+ generic_fillattr(dentry->d_inode, kstat);
+
+ /* Add birth time. */
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
+ if (dentry->d_inode) {
+ struct vbsf_inode_info *pInodeInfo = VBSF_GET_INODE_INFO(dentry->d_inode);
+ if (pInodeInfo) {
+ vbsf_time_to_linux(&kstat->btime, &pInodeInfo->BirthTime);
+ kstat->result_mask |= STATX_BTIME;
+ }
+ }
+# endif
+
+ /*
+ * FsPerf shows the following numbers for sequential file access against
+ * a tmpfs folder on an AMD 1950X host running debian buster/sid:
+ *
+ * block size = r128600 ----- r128755 -----
+ * reads reads writes
+ * 4096 KB = 2254 MB/s 4953 MB/s 3668 MB/s
+ * 2048 KB = 2368 MB/s 4908 MB/s 3541 MB/s
+ * 1024 KB = 2208 MB/s 4011 MB/s 3291 MB/s
+ * 512 KB = 1908 MB/s 3399 MB/s 2721 MB/s
+ * 256 KB = 1625 MB/s 2679 MB/s 2251 MB/s
+ * 128 KB = 1413 MB/s 1967 MB/s 1684 MB/s
+ * 64 KB = 1152 MB/s 1409 MB/s 1265 MB/s
+ * 32 KB = 726 MB/s 815 MB/s 783 MB/s
+ * 16 KB = 683 MB/s 475 MB/s
+ * 8 KB = 294 MB/s 286 MB/s
+ * 4 KB = 145 MB/s 156 MB/s 149 MB/s
+ *
+ */
+ if (S_ISREG(kstat->mode))
+ kstat->blksize = _1M;
+ else if (S_ISDIR(kstat->mode))
+ /** @todo this may need more tuning after we rewrite the directory handling. */
+ kstat->blksize = _16K;
+ }
+ return rc;
}
+#endif /* >= 2.5.18 */
+
-static struct sf_dir_buf *sf_dir_buf_alloc(void)
+/**
+ * Modify inode attributes.
+ */
+int vbsf_inode_setattr(struct dentry *dentry, struct iattr *iattr)
{
- struct sf_dir_buf *b;
-
- TRACE();
- b = kmalloc(sizeof(*b), GFP_KERNEL);
- if (!b) {
- LogRelFunc(("could not alloc directory buffer\n"));
- return NULL;
- }
-#ifdef USE_VMALLOC
- b->buf = vmalloc(DIR_BUFFER_SIZE);
+ struct inode *pInode = dentry->d_inode;
+ struct vbsf_super_info *pSuperInfo = VBSF_GET_SUPER_INFO(pInode->i_sb);
+ struct vbsf_inode_info *sf_i = VBSF_GET_INODE_INFO(pInode);
+ int vrc;
+ int rc;
+
+ SFLOGFLOW(("vbsf_inode_setattr: dentry=%p inode=%p ia_valid=%#x %s\n",
+ dentry, pInode, iattr->ia_valid, sf_i ? sf_i->path->String.ach : NULL));
+ AssertReturn(sf_i, -EINVAL);
+
+ /*
+ * Need to check whether the caller is allowed to modify the attributes or not.
+ */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)
+ rc = setattr_prepare(dentry, iattr);
#else
- b->buf = kmalloc(DIR_BUFFER_SIZE, GFP_KERNEL);
+ rc = inode_change_ok(pInode, iattr);
+#endif
+ if (rc == 0) {
+ /*
+ * Don't modify MTIME and CTIME for open(O_TRUNC) and ftruncate, those
+ * operations will set those timestamps automatically. Saves a host call.
+ */
+ unsigned fAttrs = iattr->ia_valid;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 15)
+ fAttrs &= ~ATTR_FILE;
#endif
- if (!b->buf) {
- kfree(b);
- LogRelFunc(("could not alloc directory buffer storage\n"));
- return NULL;
- }
-
- INIT_LIST_HEAD(&b->head);
- b->cEntries = 0;
- b->cbUsed = 0;
- b->cbFree = DIR_BUFFER_SIZE;
- return b;
+ if ( fAttrs == (ATTR_SIZE | ATTR_MTIME | ATTR_CTIME)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
+ || (fAttrs & (ATTR_OPEN | ATTR_SIZE)) == (ATTR_OPEN | ATTR_SIZE)
+#endif
+ )
+ fAttrs &= ~(ATTR_MTIME | ATTR_CTIME);
+
+ /*
+ * We only implement a handful of attributes, so ignore any attempts
+ * at setting bits we don't support.
+ */
+ if (fAttrs & (ATTR_MODE | ATTR_ATIME | ATTR_MTIME | ATTR_CTIME | ATTR_SIZE)) {
+ /*
+ * Try find a handle which allows us to modify the attributes, otherwise
+ * open the file/dir/whatever.
+ */
+ union SetAttrReqs
+ {
+ VBOXSFCREATEREQ Create;
+ VBOXSFOBJINFOREQ Info;
+ VBOXSFSETFILESIZEREQ SetSize;
+ VBOXSFCLOSEREQ Close;
+ } *pReq;
+ size_t cbReq;
+ SHFLHANDLE hHostFile;
+ /** @todo ATTR_FILE (2.6.15+) could be helpful here if we like. */
+ struct vbsf_handle *pHandle = fAttrs & ATTR_SIZE
+ ? vbsf_handle_find(sf_i, VBSF_HANDLE_F_WRITE, 0)
+ : vbsf_handle_find(sf_i, 0, 0);
+ if (pHandle) {
+ hHostFile = pHandle->hHost;
+ cbReq = RT_MAX(sizeof(VBOXSFOBJINFOREQ), sizeof(VBOXSFSETFILESIZEREQ));
+ pReq = (union SetAttrReqs *)VbglR0PhysHeapAlloc(cbReq);
+ if (pReq) {
+ /* likely */
+ } else
+ rc = -ENOMEM;
+ } else {
+ hHostFile = SHFL_HANDLE_NIL;
+ cbReq = RT_MAX(sizeof(pReq->Info), sizeof(pReq->Create) + SHFLSTRING_HEADER_SIZE + sf_i->path->u16Size);
+ pReq = (union SetAttrReqs *)VbglR0PhysHeapAlloc(cbReq);
+ if (pReq) {
+ RT_ZERO(pReq->Create.CreateParms);
+ pReq->Create.CreateParms.Handle = SHFL_HANDLE_NIL;
+ pReq->Create.CreateParms.CreateFlags = SHFL_CF_ACT_OPEN_IF_EXISTS
+ | SHFL_CF_ACT_FAIL_IF_NEW
+ | SHFL_CF_ACCESS_ATTR_WRITE;
+ if (fAttrs & ATTR_SIZE)
+ pReq->Create.CreateParms.CreateFlags |= SHFL_CF_ACCESS_WRITE;
+ memcpy(&pReq->Create.StrPath, sf_i->path, SHFLSTRING_HEADER_SIZE + sf_i->path->u16Size);
+ vrc = VbglR0SfHostReqCreate(pSuperInfo->map.root, &pReq->Create);
+ if (RT_SUCCESS(vrc)) {
+ if (pReq->Create.CreateParms.Result == SHFL_FILE_EXISTS) {
+ hHostFile = pReq->Create.CreateParms.Handle;
+ Assert(hHostFile != SHFL_HANDLE_NIL);
+ vbsf_dentry_chain_increase_ttl(dentry);
+ } else {
+ LogFunc(("file %s does not exist\n", sf_i->path->String.utf8));
+ vbsf_dentry_invalidate_ttl(dentry);
+ sf_i->force_restat = true;
+ rc = -ENOENT;
+ }
+ } else {
+ rc = -RTErrConvertToErrno(vrc);
+ LogFunc(("VbglR0SfCreate(%s) failed vrc=%Rrc rc=%d\n", sf_i->path->String.ach, vrc, rc));
+ }
+ } else
+ rc = -ENOMEM;
+ }
+ if (rc == 0) {
+ /*
+ * Set mode and/or timestamps.
+ */
+ if (fAttrs & (ATTR_MODE | ATTR_ATIME | ATTR_MTIME | ATTR_CTIME)) {
+ /* Fill in the attributes. Start by setting all to zero
+ since the host will ignore zeroed fields. */
+ RT_ZERO(pReq->Info.ObjInfo);
+
+ if (fAttrs & ATTR_MODE) {
+ pReq->Info.ObjInfo.Attr.fMode = sf_access_permissions_to_vbox(iattr->ia_mode);
+ if (iattr->ia_mode & S_IFDIR)
+ pReq->Info.ObjInfo.Attr.fMode |= RTFS_TYPE_DIRECTORY;
+ else if (iattr->ia_mode & S_IFLNK)
+ pReq->Info.ObjInfo.Attr.fMode |= RTFS_TYPE_SYMLINK;
+ else
+ pReq->Info.ObjInfo.Attr.fMode |= RTFS_TYPE_FILE;
+ }
+ if (fAttrs & ATTR_ATIME)
+ vbsf_time_to_vbox(&pReq->Info.ObjInfo.AccessTime, &iattr->ia_atime);
+ if (fAttrs & ATTR_MTIME)
+ vbsf_time_to_vbox(&pReq->Info.ObjInfo.ModificationTime, &iattr->ia_mtime);
+ if (fAttrs & ATTR_CTIME)
+ vbsf_time_to_vbox(&pReq->Info.ObjInfo.ChangeTime, &iattr->ia_ctime);
+
+ /* Make the change. */
+ vrc = VbglR0SfHostReqSetObjInfo(pSuperInfo->map.root, &pReq->Info, hHostFile);
+ if (RT_SUCCESS(vrc)) {
+ vbsf_update_inode(pInode, sf_i, &pReq->Info.ObjInfo, pSuperInfo, true /*fLocked*/, fAttrs);
+ } else {
+ rc = -RTErrConvertToErrno(vrc);
+ LogFunc(("VbglR0SfHostReqSetObjInfo(%s) failed vrc=%Rrc rc=%d\n", sf_i->path->String.ach, vrc, rc));
+ }
+ }
+
+ /*
+ * Change the file size.
+ * Note! Old API is more convenient here as it gives us up to date
+ * inode info back.
+ */
+ if ((fAttrs & ATTR_SIZE) && rc == 0) {
+ /*vrc = VbglR0SfHostReqSetFileSize(pSuperInfo->map.root, &pReq->SetSize, hHostFile, iattr->ia_size);
+ if (RT_SUCCESS(vrc)) {
+ i_size_write(pInode, iattr->ia_size);
+ } else if (vrc == VERR_NOT_IMPLEMENTED)*/ {
+ /* Fallback for pre 6.0 hosts: */
+ RT_ZERO(pReq->Info.ObjInfo);
+ pReq->Info.ObjInfo.cbObject = iattr->ia_size;
+ vrc = VbglR0SfHostReqSetFileSizeOld(pSuperInfo->map.root, &pReq->Info, hHostFile);
+ if (RT_SUCCESS(vrc))
+ vbsf_update_inode(pInode, sf_i, &pReq->Info.ObjInfo, pSuperInfo, true /*fLocked*/, fAttrs);
+ }
+ if (RT_SUCCESS(vrc)) {
+ /** @todo there is potentially more to be done here if there are mappings of
+ * the lovely file. */
+ } else {
+ rc = -RTErrConvertToErrno(vrc);
+ LogFunc(("VbglR0SfHostReqSetFileSize(%s, %#llx) failed vrc=%Rrc rc=%d\n",
+ sf_i->path->String.ach, (unsigned long long)iattr->ia_size, vrc, rc));
+ }
+ }
+
+ /*
+ * Clean up.
+ */
+ if (!pHandle) {
+ vrc = VbglR0SfHostReqClose(pSuperInfo->map.root, &pReq->Close, hHostFile);
+ if (RT_FAILURE(vrc))
+ LogFunc(("VbglR0SfHostReqClose(%s [%#llx]) failed vrc=%Rrc\n", sf_i->path->String.utf8, hHostFile, vrc));
+ }
+ }
+ if (pReq)
+ VbglR0PhysHeapFree(pReq);
+ if (pHandle)
+ vbsf_handle_release(pHandle, pSuperInfo, "vbsf_inode_setattr");
+ } else
+ SFLOGFLOW(("vbsf_inode_setattr: Nothing to do here: %#x (was %#x).\n", fAttrs, iattr->ia_valid));
+ }
+ return rc;
}
-static void sf_dir_buf_free(struct sf_dir_buf *b)
-{
- BUG_ON(!b || !b->buf);
- TRACE();
- list_del(&b->head);
-#ifdef USE_VMALLOC
- vfree(b->buf);
-#else
- kfree(b->buf);
-#endif
- kfree(b);
+static int vbsf_make_path(const char *caller, struct vbsf_inode_info *sf_i,
+ const char *d_name, size_t d_len, SHFLSTRING **result)
+{
+ size_t path_len, shflstring_len;
+ SHFLSTRING *tmp;
+ uint16_t p_len;
+ uint8_t *p_name;
+ int fRoot = 0;
+
+ TRACE();
+ p_len = sf_i->path->u16Length;
+ p_name = sf_i->path->String.utf8;
+
+ if (p_len == 1 && *p_name == '/') {
+ path_len = d_len + 1;
+ fRoot = 1;
+ } else {
+ /* lengths of constituents plus terminating zero plus slash */
+ path_len = p_len + d_len + 2;
+ if (path_len > 0xffff) {
+ LogFunc(("path too long. caller=%s, path_len=%zu\n",
+ caller, path_len));
+ return -ENAMETOOLONG;
+ }
+ }
+
+ shflstring_len = offsetof(SHFLSTRING, String.utf8) + path_len;
+ tmp = kmalloc(shflstring_len, GFP_KERNEL);
+ if (!tmp) {
+ LogRelFunc(("kmalloc failed, caller=%s\n", caller));
+ return -ENOMEM;
+ }
+ tmp->u16Length = path_len - 1;
+ tmp->u16Size = path_len;
+
+ if (fRoot)
+ memcpy(&tmp->String.utf8[0], d_name, d_len + 1);
+ else {
+ memcpy(&tmp->String.utf8[0], p_name, p_len);
+ tmp->String.utf8[p_len] = '/';
+ memcpy(&tmp->String.utf8[p_len + 1], d_name, d_len);
+ tmp->String.utf8[p_len + 1 + d_len] = '\0';
+ }
+
+ *result = tmp;
+ return 0;
}
+
/**
- * Free the directory buffer.
+ * [dentry] contains string encoded in coding system that corresponds
+ * to [pSuperInfo]->nls, we must convert it to UTF8 here and pass down to
+ * [vbsf_make_path] which will allocate SHFLSTRING and fill it in
*/
-void sf_dir_info_free(struct sf_dir_info *p)
+int vbsf_path_from_dentry(struct vbsf_super_info *pSuperInfo, struct vbsf_inode_info *sf_i, struct dentry *dentry,
+ SHFLSTRING **result, const char *caller)
{
- struct list_head *list, *pos, *tmp;
+ int err;
+ const char *d_name;
+ size_t d_len;
+ const char *name;
+ size_t len = 0;
+
+ TRACE();
+ d_name = dentry->d_name.name;
+ d_len = dentry->d_name.len;
+
+ if (pSuperInfo->nls) {
+ size_t in_len, i, out_bound_len;
+ const char *in;
+ char *out;
+
+ in = d_name;
+ in_len = d_len;
+
+ out_bound_len = PATH_MAX;
+ out = kmalloc(out_bound_len, GFP_KERNEL);
+ name = out;
+
+ for (i = 0; i < d_len; ++i) {
+ /* We renamed the linux kernel wchar_t type to linux_wchar_t in
+ the-linux-kernel.h, as it conflicts with the C++ type of that name. */
+ linux_wchar_t uni;
+ int nb;
+
+ nb = pSuperInfo->nls->char2uni(in, in_len, &uni);
+ if (nb < 0) {
+ LogFunc(("nls->char2uni failed %x %d\n",
+ *in, in_len));
+ err = -EINVAL;
+ goto fail1;
+ }
+ in_len -= nb;
+ in += nb;
- TRACE();
- list = &p->info_list;
- list_for_each_safe(pos, tmp, list) {
- struct sf_dir_buf *b;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)
+ nb = utf32_to_utf8(uni, out, out_bound_len);
+#else
+ nb = utf8_wctomb(out, uni, out_bound_len);
+#endif
+ if (nb < 0) {
+ LogFunc(("nls->uni2char failed %x %d\n",
+ uni, out_bound_len));
+ err = -EINVAL;
+ goto fail1;
+ }
+ out_bound_len -= nb;
+ out += nb;
+ len += nb;
+ }
+ if (len >= PATH_MAX - 1) {
+ err = -ENAMETOOLONG;
+ goto fail1;
+ }
+
+ LogFunc(("result(%d) = %.*s\n", len, len, name));
+ *out = 0;
+ } else {
+ name = d_name;
+ len = d_len;
+ }
+
+ err = vbsf_make_path(caller, sf_i, name, len, result);
+ if (name != d_name)
+ kfree(name);
+
+ return err;
- b = list_entry(pos, struct sf_dir_buf, head);
- sf_dir_buf_free(b);
- }
- kfree(p);
+ fail1:
+ kfree(name);
+ return err;
}
-/**
- * Empty (but not free) the directory buffer.
- */
-void sf_dir_info_empty(struct sf_dir_info *p)
-{
- struct list_head *list, *pos, *tmp;
- TRACE();
- list = &p->info_list;
- list_for_each_safe(pos, tmp, list) {
- struct sf_dir_buf *b;
- b = list_entry(pos, struct sf_dir_buf, head);
- b->cEntries = 0;
- b->cbUsed = 0;
- b->cbFree = DIR_BUFFER_SIZE;
- }
-}
/**
- * Create a new directory buffer descriptor.
+ * This is called during name resolution/lookup to check if the @a dentry in the
+ * cache is still valid. The actual validation is job is handled by
+ * vbsf_inode_revalidate_worker().
+ *
+ * @note Caller holds no relevant locks, just a dentry reference.
*/
-struct sf_dir_info *sf_dir_info_alloc(void)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)
+static int vbsf_dentry_revalidate(struct dentry *dentry, unsigned flags)
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
+static int vbsf_dentry_revalidate(struct dentry *dentry, struct nameidata *nd)
+#else
+static int vbsf_dentry_revalidate(struct dentry *dentry, int flags)
+#endif
{
- struct sf_dir_info *p;
-
- TRACE();
- p = kmalloc(sizeof(*p), GFP_KERNEL);
- if (!p) {
- LogRelFunc(("could not alloc directory info\n"));
- return NULL;
- }
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) && LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)
+ int const flags = nd ? nd->flags : 0;
+#endif
- INIT_LIST_HEAD(&p->info_list);
- return p;
+ int rc;
+
+ Assert(dentry);
+ SFLOGFLOW(("vbsf_dentry_revalidate: %p %#x %s\n", dentry, flags,
+ dentry->d_inode ? VBSF_GET_INODE_INFO(dentry->d_inode)->path->String.ach : "<negative>"));
+
+ /*
+ * See Documentation/filesystems/vfs.txt why we skip LOOKUP_RCU.
+ *
+ * Also recommended: https://lwn.net/Articles/649115/
+ * https://lwn.net/Articles/649729/
+ * https://lwn.net/Articles/650786/
+ *
+ */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 38)
+ if (flags & LOOKUP_RCU) {
+ rc = -ECHILD;
+ SFLOGFLOW(("vbsf_dentry_revalidate: RCU -> -ECHILD\n"));
+ } else
+#endif
+ {
+ /*
+ * Do we have an inode or not? If not it's probably a negative cache
+ * entry, otherwise most likely a positive one.
+ */
+ struct inode *pInode = dentry->d_inode;
+ if (pInode) {
+ /*
+ * Positive entry.
+ *
+ * Note! We're more aggressive here than other remote file systems,
+ * current (4.19) CIFS will for instance revalidate the inode
+ * and ignore the dentry timestamp for positive entries.
+ */
+ unsigned long const cJiffiesAge = jiffies - vbsf_dentry_get_update_jiffies(dentry);
+ struct vbsf_super_info *pSuperInfo = VBSF_GET_SUPER_INFO(dentry->d_sb);
+ if (cJiffiesAge < pSuperInfo->cJiffiesDirCacheTTL) {
+ SFLOGFLOW(("vbsf_dentry_revalidate: age: %lu vs. TTL %lu -> 1\n", cJiffiesAge, pSuperInfo->cJiffiesDirCacheTTL));
+ rc = 1;
+ } else if (!vbsf_inode_revalidate_worker(dentry, true /*fForced*/, false /*fInodeLocked*/)) {
+ vbsf_dentry_set_update_jiffies(dentry, jiffies);
+ SFLOGFLOW(("vbsf_dentry_revalidate: age: %lu vs. TTL %lu -> reval -> 1\n", cJiffiesAge, pSuperInfo->cJiffiesDirCacheTTL));
+ rc = 1;
+ } else {
+ SFLOGFLOW(("vbsf_dentry_revalidate: age: %lu vs. TTL %lu -> reval -> 0\n", cJiffiesAge, pSuperInfo->cJiffiesDirCacheTTL));
+ rc = 0;
+ }
+ } else {
+ /*
+ * Negative entry.
+ *
+ * Invalidate dentries for open and renames here as we'll revalidate
+ * these when taking the actual action (also good for case preservation
+ * if we do case-insensitive mounts against windows + mac hosts at some
+ * later point).
+ */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
+ if (flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET))
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 75)
+ if (flags & LOOKUP_CREATE)
+#else
+ if (0)
+#endif
+ {
+ SFLOGFLOW(("vbsf_dentry_revalidate: negative: create or rename target -> 0\n"));
+ rc = 0;
+ } else {
+ /* Can we skip revalidation based on TTL? */
+ unsigned long const cJiffiesAge = vbsf_dentry_get_update_jiffies(dentry) - jiffies;
+ struct vbsf_super_info *pSuperInfo = VBSF_GET_SUPER_INFO(dentry->d_sb);
+ if (cJiffiesAge < pSuperInfo->cJiffiesDirCacheTTL) {
+ SFLOGFLOW(("vbsf_dentry_revalidate: negative: age: %lu vs. TTL %lu -> 1\n", cJiffiesAge, pSuperInfo->cJiffiesDirCacheTTL));
+ rc = 1;
+ } else {
+ /* We could revalidate it here, but we could instead just
+ have the caller kick it out. */
+ /** @todo stat the direntry and see if it exists now. */
+ SFLOGFLOW(("vbsf_dentry_revalidate: negative: age: %lu vs. TTL %lu -> 0\n", cJiffiesAge, pSuperInfo->cJiffiesDirCacheTTL));
+ rc = 0;
+ }
+ }
+ }
+ }
+ return rc;
}
-/**
- * Search for an empty directory content buffer.
- */
-static struct sf_dir_buf *sf_get_empty_dir_buf(struct sf_dir_info *sf_d)
-{
- struct list_head *list, *pos;
-
- list = &sf_d->info_list;
- list_for_each(pos, list) {
- struct sf_dir_buf *b;
-
- b = list_entry(pos, struct sf_dir_buf, head);
- if (!b)
- return NULL;
- else {
- if (b->cbUsed == 0)
- return b;
- }
- }
-
- return NULL;
-}
+#ifdef SFLOG_ENABLED
-int sf_dir_read_all(struct sf_glob_info *sf_g, struct sf_inode_info *sf_i,
- struct sf_dir_info *sf_d, SHFLHANDLE handle)
+/** For logging purposes only. */
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 38)
+static int vbsf_dentry_delete(const struct dentry *pDirEntry)
+# else
+static int vbsf_dentry_delete(struct dentry *pDirEntry)
+# endif
{
- int err;
- SHFLSTRING *mask;
- struct sf_dir_buf *b;
-
- TRACE();
- err = sf_make_path(__func__, sf_i, "*", 1, &mask);
- if (err)
- goto fail0;
-
- for (;;) {
- int rc;
- void *buf;
- uint32_t cbSize;
- uint32_t cEntries;
-
- b = sf_get_empty_dir_buf(sf_d);
- if (!b) {
- b = sf_dir_buf_alloc();
- if (!b) {
- err = -ENOMEM;
- LogRelFunc(("could not alloc directory buffer\n"));
- goto fail1;
- }
- list_add(&b->head, &sf_d->info_list);
- }
-
- buf = b->buf;
- cbSize = b->cbFree;
-
- rc = VbglR0SfDirInfo(&client_handle, &sf_g->map, handle, mask,
- 0, 0, &cbSize, buf, &cEntries);
- switch (rc) {
- case VINF_SUCCESS:
- RT_FALL_THRU();
- case VERR_NO_MORE_FILES:
- break;
- case VERR_NO_TRANSLATION:
- LogFunc(("host could not translate entry\n"));
- /* XXX */
- break;
- default:
- err = -RTErrConvertToErrno(rc);
- LogFunc(("VbglR0SfDirInfo failed rc=%Rrc\n", rc));
- goto fail1;
- }
-
- b->cEntries += cEntries;
- b->cbFree -= cbSize;
- b->cbUsed += cbSize;
-
- if (RT_FAILURE(rc))
- break;
- }
- err = 0;
-
- fail1:
- kfree(mask);
-
- fail0:
- return err;
+ SFLOGFLOW(("vbsf_dentry_delete: %p\n", pDirEntry));
+ return 0;
}
-int sf_get_volume_info(struct super_block *sb, STRUCT_STATFS * stat)
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
+/** For logging purposes only. */
+static int vbsf_dentry_init(struct dentry *pDirEntry)
{
- struct sf_glob_info *sf_g;
- SHFLVOLINFO SHFLVolumeInfo;
- uint32_t cbBuffer;
- int rc;
-
- sf_g = GET_GLOB_INFO(sb);
- cbBuffer = sizeof(SHFLVolumeInfo);
- rc = VbglR0SfFsInfo(&client_handle, &sf_g->map, 0,
- SHFL_INFO_GET | SHFL_INFO_VOLUME, &cbBuffer,
- (PSHFLDIRINFO) & SHFLVolumeInfo);
- if (RT_FAILURE(rc))
- return -RTErrConvertToErrno(rc);
-
- stat->f_type = NFS_SUPER_MAGIC; /* XXX vboxsf type? */
- stat->f_bsize = SHFLVolumeInfo.ulBytesPerAllocationUnit;
- stat->f_blocks = SHFLVolumeInfo.ullTotalAllocationBytes
- / SHFLVolumeInfo.ulBytesPerAllocationUnit;
- stat->f_bfree = SHFLVolumeInfo.ullAvailableAllocationBytes
- / SHFLVolumeInfo.ulBytesPerAllocationUnit;
- stat->f_bavail = SHFLVolumeInfo.ullAvailableAllocationBytes
- / SHFLVolumeInfo.ulBytesPerAllocationUnit;
- stat->f_files = 1000;
- stat->f_ffree = 1000; /* don't return 0 here since the guest may think
- * that it is not possible to create any more files */
- stat->f_fsid.val[0] = 0;
- stat->f_fsid.val[1] = 0;
- stat->f_namelen = 255;
- return 0;
+ SFLOGFLOW(("vbsf_dentry_init: %p\n", pDirEntry));
+ return 0;
}
+# endif
-struct dentry_operations sf_dentry_ops = {
- .d_revalidate = sf_dentry_revalidate
-};
-
-int sf_init_backing_dev(struct sf_glob_info *sf_g)
-{
- int rc = 0;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) && LINUX_VERSION_CODE <= KERNEL_VERSION(3, 19, 0)
- /* Each new shared folder map gets a new uint64_t identifier,
- * allocated in sequence. We ASSUME the sequence will not wrap. */
- static uint64_t s_u64Sequence = 0;
- uint64_t u64CurrentSequence = ASMAtomicIncU64(&s_u64Sequence);
-
- sf_g->bdi.ra_pages = 0; /* No readahead */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 12)
- sf_g->bdi.capabilities = BDI_CAP_MAP_DIRECT /* MAP_SHARED */
- | BDI_CAP_MAP_COPY /* MAP_PRIVATE */
- | BDI_CAP_READ_MAP /* can be mapped for reading */
- | BDI_CAP_WRITE_MAP /* can be mapped for writing */
- | BDI_CAP_EXEC_MAP; /* can be mapped for execution */
-#endif /* >= 2.6.12 */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
- rc = bdi_init(&sf_g->bdi);
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 26)
- if (!rc)
- rc = bdi_register(&sf_g->bdi, NULL, "vboxsf-%llu",
- (unsigned long long)u64CurrentSequence);
-#endif /* >= 2.6.26 */
-#endif /* >= 2.6.24 */
-#endif /* >= 2.6.0 && <= 3.19.0 */
- return rc;
-}
+#endif /* SFLOG_ENABLED */
-void sf_done_backing_dev(struct sf_glob_info *sf_g)
-{
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) && LINUX_VERSION_CODE <= KERNEL_VERSION(3, 19, 0)
- bdi_destroy(&sf_g->bdi); /* includes bdi_unregister() */
+/**
+ * Directory entry operations.
+ *
+ * Since 2.6.38 this is used via the super_block::s_d_op member.
+ */
+struct dentry_operations vbsf_dentry_ops = {
+ .d_revalidate = vbsf_dentry_revalidate,
+#ifdef SFLOG_ENABLED
+ .d_delete = vbsf_dentry_delete,
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
+ .d_init = vbsf_dentry_init,
+# endif
#endif
-}
+};
+
diff --git a/ubuntu/vbox/vboxsf/vbsfmount.h b/ubuntu/vbox/vboxsf/vbsfmount.h
index c14e5284a537..cc1a05139ae8 100644
--- a/ubuntu/vbox/vboxsf/vbsfmount.h
+++ b/ubuntu/vbox/vboxsf/vbsfmount.h
@@ -37,54 +37,131 @@
/* Linux constraints the size of data mount argument to PAGE_SIZE - 1. */
#define MAX_HOST_NAME 256
#define MAX_NLS_NAME 32
+#define VBSF_DEFAULT_TTL_MS 200
#define VBSF_MOUNT_SIGNATURE_BYTE_0 '\377'
#define VBSF_MOUNT_SIGNATURE_BYTE_1 '\376'
#define VBSF_MOUNT_SIGNATURE_BYTE_2 '\375'
+/**
+ * VBox Linux Shared Folders VFS caching mode.
+ */
+enum vbsf_cache_mode {
+ /** Use the kernel modules default caching mode (kVbsfCacheMode_Strict). */
+ kVbsfCacheMode_Default = 0,
+ /** No caching, go to the host for everything. This will have some minor
+ * coherency issues for memory mapping with unsynced dirty pages. */
+ kVbsfCacheMode_None,
+ /** No caching, except for files with writable memory mappings.
+ * (Note to future: if we do oplock like stuff, it goes in here.) */
+ kVbsfCacheMode_Strict,
+ /** Use page cache for reads.
+ * This improves guest performance for read intensive jobs, like compiling
+ * building. The flip side is that the guest may not see host modification in a
+ * timely manner and possibly update files with out-of-date cache information,
+ * as there exists no protocol for the host to notify the guest about file
+ * modifications. */
+ kVbsfCacheMode_Read,
+ /** Use page cache for both reads and writes as far as that's possible.
+ * This is good for guest performance, but the price is that the guest possibly
+ * ignoring host changes and the host not seeing guest changes in a timely
+ * manner. */
+ kVbsfCacheMode_ReadWrite,
+ /** End of valid values (exclusive). */
+ kVbsfCacheMode_End,
+ /** Make sure the enum is sizeof(int32_t). */
+ kVbsfCacheMode_32BitHack = 0x7fffffff
+};
+
+/**
+ * VBox Linux Shared Folders VFS mount options.
+ */
struct vbsf_mount_info_new {
- /*
- * The old version of the mount_info struct started with a
- * char name[MAX_HOST_NAME] field, where name cannot be '\0'.
- * So the new version of the mount_info struct starts with a
- * nullchar field which is always 0 so that we can detect and
- * reject the old structure being passed.
- */
- char nullchar;
- char signature[3]; /* signature */
- int length; /* length of the whole structure */
- char name[MAX_HOST_NAME]; /* share name */
- char nls_name[MAX_NLS_NAME]; /* name of an I/O charset */
- int uid; /* user ID for all entries, default 0=root */
- int gid; /* group ID for all entries, default 0=root */
- int ttl; /* time to live */
- int dmode; /* mode for directories if != 0xffffffff */
- int fmode; /* mode for regular files if != 0xffffffff */
- int dmask; /* umask applied to directories */
- int fmask; /* umask applied to regular files */
- char tag[32]; /**< Mount tag for VBoxService automounter. @since 6.0 */
+ /**
+ * The old version of the mount_info struct started with a
+ * char name[MAX_HOST_NAME] field, where name cannot be '\0'.
+ * So the new version of the mount_info struct starts with a
+ * nullchar field which is always 0 so that we can detect and
+ * reject the old structure being passed.
+ */
+ char nullchar;
+ /** Signature */
+ char signature[3];
+ /** Length of the whole structure */
+ int length;
+ /** Share name */
+ char name[MAX_HOST_NAME];
+ /** Name of an I/O charset */
+ char nls_name[MAX_NLS_NAME];
+ /** User ID for all entries, default 0=root */
+ int uid;
+ /** Group ID for all entries, default 0=root */
+ int gid;
+ /** Directory entry and inode time to live in milliseconds.
+ * -1 for kernel default, 0 to disable caching.
+ * @sa vbsf_mount_info_new::msDirCacheTTL, vbsf_mount_info_new::msInodeTTL */
+ int ttl;
+ /** Mode for directories if != -1. */
+ int dmode;
+ /** Mode for regular files if != -1. */
+ int fmode;
+ /** umask applied to directories */
+ int dmask;
+ /** umask applied to regular files */
+ int fmask;
+ /** Mount tag for VBoxService automounter.
+ * @since 6.0.0 */
+ char szTag[32];
+ /** Max pages to read & write at a time.
+ * @since 6.0.6 */
+ uint32_t cMaxIoPages;
+ /** The directory content buffer size. Set to 0 for kernel module default.
+ * Larger value reduces the number of host calls on large directories. */
+ uint32_t cbDirBuf;
+ /** The time to live for directory entries (in milliseconds). @a ttl is used
+ * if negative.
+ * @since 6.0.6 */
+ int32_t msDirCacheTTL;
+ /** The time to live for inode information (in milliseconds). @a ttl is used
+ * if negative.
+ * @since 6.0.6 */
+ int32_t msInodeTTL;
+ /** The cache and coherency mode.
+ * @since 6.0.6 */
+ enum vbsf_cache_mode enmCacheMode;
};
+#ifdef AssertCompileSize
+AssertCompileSize(struct vbsf_mount_info_new, 2*4 + MAX_HOST_NAME + MAX_NLS_NAME + 7*4 + 32 + 5*4);
+#endif
+/**
+ * For use with the vbsfmount_complete() helper.
+ */
struct vbsf_mount_opts {
- int uid;
- int gid;
- int ttl;
- int dmode;
- int fmode;
- int dmask;
- int fmask;
- int ronly;
- int sloppy;
- int noexec;
- int nodev;
- int nosuid;
- int remount;
- char nls_name[MAX_NLS_NAME];
- char *convertcp;
+ int ttl;
+ int32_t msDirCacheTTL;
+ int32_t msInodeTTL;
+ uint32_t cMaxIoPages;
+ uint32_t cbDirBuf;
+ enum vbsf_cache_mode enmCacheMode;
+ int uid;
+ int gid;
+ int dmode;
+ int fmode;
+ int dmask;
+ int fmask;
+ int ronly;
+ int sloppy;
+ int noexec;
+ int nodev;
+ int nosuid;
+ int remount;
+ char nls_name[MAX_NLS_NAME];
+ char *convertcp;
};
/** Completes the mount operation by adding the new mount point to mtab if required. */
int vbsfmount_complete(const char *host_name, const char *mount_point,
- unsigned long flags, struct vbsf_mount_opts *opts);
+ unsigned long flags, struct vbsf_mount_opts *opts);
#endif /* !GA_INCLUDED_SRC_linux_sharedfolders_vbsfmount_h */
diff --git a/ubuntu/vbox/vboxsf/version-generated.h b/ubuntu/vbox/vboxsf/version-generated.h
index ff669b300eaa..85722b642c49 100644
--- a/ubuntu/vbox/vboxsf/version-generated.h
+++ b/ubuntu/vbox/vboxsf/version-generated.h
@@ -3,9 +3,9 @@
#define VBOX_VERSION_MAJOR 6
#define VBOX_VERSION_MINOR 0
-#define VBOX_VERSION_BUILD 4
-#define VBOX_VERSION_STRING_RAW "6.0.4"
-#define VBOX_VERSION_STRING "6.0.4_KernelUbuntu"
+#define VBOX_VERSION_BUILD 6
+#define VBOX_VERSION_STRING_RAW "6.0.6"
+#define VBOX_VERSION_STRING "6.0.6_KernelUbuntu"
#define VBOX_API_VERSION_STRING "6_0"
#define VBOX_PRIVATE_BUILD_DESC "Private build by buildd"
diff --git a/ubuntu/vbox/vboxsf/vfsmod.c b/ubuntu/vbox/vboxsf/vfsmod.c
index dd6eb406a0af..b125e938129b 100644
--- a/ubuntu/vbox/vboxsf/vfsmod.c
+++ b/ubuntu/vbox/vboxsf/vfsmod.c
@@ -30,656 +30,1018 @@
/**
* @note Anyone wishing to make changes here might wish to take a look at
- * https://github.com/torvalds/linux/blob/master/Documentation/filesystems/vfs.txt
+ * https://github.com/torvalds/linux/blob/master/Documentation/filesystems/vfs.txt
* which seems to be the closest there is to official documentation on
* writing filesystem drivers for Linux.
+ *
+ * See also: http://us1.samba.org/samba/ftp/cifs-cvs/ols2006-fs-tutorial-smf.odp
*/
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
#include "vfsmod.h"
#include "version-generated.h"
#include "revision-generated.h"
#include "product-generated.h"
-#include "VBoxGuestR0LibInternal.h"
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)
# include <uapi/linux/mount.h> /* for MS_REMOUNT */
#elif LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0)
# include <linux/mount.h>
#endif
#include <linux/seq_file.h>
+#include <linux/vfs.h>
+#include <VBox/err.h>
+#include <iprt/path.h>
-MODULE_DESCRIPTION(VBOX_PRODUCT " VFS Module for Host File System Access");
-MODULE_AUTHOR(VBOX_VENDOR);
-MODULE_LICENSE("GPL and additional rights");
-#ifdef MODULE_ALIAS_FS
-MODULE_ALIAS_FS("vboxsf");
-#endif
-#ifdef MODULE_VERSION
-MODULE_VERSION(VBOX_VERSION_STRING " r" RT_XSTR(VBOX_SVN_REV));
-#endif
-/* globals */
-VBGLSFCLIENT client_handle;
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+#define VBSF_DEFAULT_MAX_IO_PAGES RT_MIN(_16K / sizeof(RTGCPHYS64) /* => 8MB buffer */, VMMDEV_MAX_HGCM_DATA_SIZE >> PAGE_SHIFT)
+#define VBSF_DEFAULT_DIR_BUF_SIZE _64K
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+VBGLSFCLIENT g_SfClient;
+uint32_t g_fHostFeatures = 0;
+/** Last valid shared folders function number. */
+uint32_t g_uSfLastFunction = SHFL_FN_SET_FILE_SIZE;
+/** Shared folders features. */
+uint64_t g_fSfFeatures = 0;
+
+/** Protects all the vbsf_inode_info::HandleList lists. */
+spinlock_t g_SfHandleLock;
+
+/** The 'follow_symlinks' module parameter.
+ * @todo Figure out how do this for 2.4.x! */
+static int g_fFollowSymlinks = 0;
-/* forward declarations */
-static struct super_operations sf_super_ops;
+/* forward declaration */
+static struct super_operations g_vbsf_super_ops;
+
+
+
+/**
+ * Copies options from the mount info structure into @a pSuperInfo.
+ *
+ * This is used both by vbsf_super_info_alloc_and_map_it() and
+ * vbsf_remount_fs().
+ */
+static void vbsf_super_info_copy_remount_options(struct vbsf_super_info *pSuperInfo, struct vbsf_mount_info_new *info)
+{
+ pSuperInfo->uid = info->uid;
+ pSuperInfo->gid = info->gid;
+
+ if ((unsigned)info->length >= RT_UOFFSETOF(struct vbsf_mount_info_new, szTag)) {
+ /* new fields */
+ pSuperInfo->dmode = info->dmode;
+ pSuperInfo->fmode = info->fmode;
+ pSuperInfo->dmask = info->dmask;
+ pSuperInfo->fmask = info->fmask;
+ } else {
+ pSuperInfo->dmode = ~0;
+ pSuperInfo->fmode = ~0;
+ }
+
+ if ((unsigned)info->length >= RT_UOFFSETOF(struct vbsf_mount_info_new, cMaxIoPages)) {
+ AssertCompile(sizeof(pSuperInfo->szTag) >= sizeof(info->szTag));
+ memcpy(pSuperInfo->szTag, info->szTag, sizeof(info->szTag));
+ pSuperInfo->szTag[sizeof(pSuperInfo->szTag) - 1] = '\0';
+ } else {
+ pSuperInfo->szTag[0] = '\0';
+ }
+
+ /* The max number of pages in an I/O request. This must take into
+ account that the physical heap generally grows in 64 KB chunks,
+ so we should not try push that limit. It also needs to take
+ into account that the host will allocate temporary heap buffers
+ for the I/O bytes we send/receive, so don't push the host heap
+ too hard as we'd have to retry with smaller requests when this
+ happens, which isn't too efficient. */
+ pSuperInfo->cMaxIoPages = VBSF_DEFAULT_MAX_IO_PAGES;
+ if ( (unsigned)info->length >= sizeof(struct vbsf_mount_info_new)
+ && info->cMaxIoPages > 0) {
+ if (info->cMaxIoPages <= VMMDEV_MAX_HGCM_DATA_SIZE >> PAGE_SHIFT)
+ pSuperInfo->cMaxIoPages = RT_MAX(info->cMaxIoPages, 2); /* read_iter/write_iter requires a minimum of 2. */
+ else
+ printk(KERN_WARNING "vboxsf: max I/O page count (%#x) is out of range, using default (%#x) instead.\n",
+ info->cMaxIoPages, pSuperInfo->cMaxIoPages);
+ }
+
+ pSuperInfo->cbDirBuf = VBSF_DEFAULT_DIR_BUF_SIZE;
+ if ( (unsigned)info->length >= RT_UOFFSETOF(struct vbsf_mount_info_new, cbDirBuf)
+ && info->cbDirBuf > 0) {
+ if (info->cbDirBuf <= _16M)
+ pSuperInfo->cbDirBuf = RT_ALIGN_32(info->cbDirBuf, PAGE_SIZE);
+ else
+ printk(KERN_WARNING "vboxsf: max directory buffer size (%#x) is out of range, using default (%#x) instead.\n",
+ info->cMaxIoPages, pSuperInfo->cMaxIoPages);
+ }
+
+ /*
+ * TTLs.
+ */
+ pSuperInfo->msTTL = info->ttl;
+ if (info->ttl > 0)
+ pSuperInfo->cJiffiesDirCacheTTL = msecs_to_jiffies(info->ttl);
+ else if (info->ttl == 0 || info->ttl != -1)
+ pSuperInfo->cJiffiesDirCacheTTL = pSuperInfo->msTTL = 0;
+ else
+ pSuperInfo->cJiffiesDirCacheTTL = msecs_to_jiffies(VBSF_DEFAULT_TTL_MS);
+ pSuperInfo->cJiffiesInodeTTL = pSuperInfo->cJiffiesDirCacheTTL;
+
+ pSuperInfo->msDirCacheTTL = -1;
+ if ( (unsigned)info->length >= RT_UOFFSETOF(struct vbsf_mount_info_new, msDirCacheTTL)
+ && info->msDirCacheTTL >= 0) {
+ if (info->msDirCacheTTL > 0) {
+ pSuperInfo->msDirCacheTTL = info->msDirCacheTTL;
+ pSuperInfo->cJiffiesDirCacheTTL = msecs_to_jiffies(info->msDirCacheTTL);
+ } else {
+ pSuperInfo->msDirCacheTTL = 0;
+ pSuperInfo->cJiffiesDirCacheTTL = 0;
+ }
+ }
+
+ pSuperInfo->msInodeTTL = -1;
+ if ( (unsigned)info->length >= RT_UOFFSETOF(struct vbsf_mount_info_new, msInodeTTL)
+ && info->msInodeTTL >= 0) {
+ if (info->msInodeTTL > 0) {
+ pSuperInfo->msInodeTTL = info->msInodeTTL;
+ pSuperInfo->cJiffiesInodeTTL = msecs_to_jiffies(info->msInodeTTL);
+ } else {
+ pSuperInfo->msInodeTTL = 0;
+ pSuperInfo->cJiffiesInodeTTL = 0;
+ }
+ }
+
+ /*
+ * Caching.
+ */
+ pSuperInfo->enmCacheMode = kVbsfCacheMode_Strict;
+ if ((unsigned)info->length >= RT_UOFFSETOF(struct vbsf_mount_info_new, enmCacheMode)) {
+ switch (info->enmCacheMode) {
+ case kVbsfCacheMode_Default:
+ case kVbsfCacheMode_Strict:
+ break;
+ case kVbsfCacheMode_None:
+ case kVbsfCacheMode_Read:
+ case kVbsfCacheMode_ReadWrite:
+ pSuperInfo->enmCacheMode = info->enmCacheMode;
+ break;
+ default:
+ printk(KERN_WARNING "vboxsf: cache mode (%#x) is out of range, using default instead.\n", info->enmCacheMode);
+ break;
+ }
+ }
+}
-/* allocate global info, try to map host share */
-static int sf_glob_alloc(struct vbsf_mount_info_new *info,
- struct sf_glob_info **sf_gp)
+/**
+ * Allocate the super info structure and try map the host share.
+ */
+static int vbsf_super_info_alloc_and_map_it(struct vbsf_mount_info_new *info, struct vbsf_super_info **sf_gp)
{
- int err, rc;
- SHFLSTRING *str_name;
- size_t name_len, str_len;
- struct sf_glob_info *sf_g;
-
- TRACE();
- sf_g = kmalloc(sizeof(*sf_g), GFP_KERNEL);
- if (!sf_g) {
- err = -ENOMEM;
- LogRelFunc(("could not allocate memory for global info\n"));
- goto fail0;
- }
-
- RT_ZERO(*sf_g);
-
- if (info->nullchar != '\0'
- || info->signature[0] != VBSF_MOUNT_SIGNATURE_BYTE_0
- || info->signature[1] != VBSF_MOUNT_SIGNATURE_BYTE_1
- || info->signature[2] != VBSF_MOUNT_SIGNATURE_BYTE_2) {
- err = -EINVAL;
- goto fail1;
- }
-
- info->name[sizeof(info->name) - 1] = 0;
- info->nls_name[sizeof(info->nls_name) - 1] = 0;
-
- name_len = strlen(info->name);
- str_len = offsetof(SHFLSTRING, String.utf8) + name_len + 1;
- str_name = kmalloc(str_len, GFP_KERNEL);
- if (!str_name) {
- err = -ENOMEM;
- LogRelFunc(("could not allocate memory for host name\n"));
- goto fail1;
- }
-
- str_name->u16Length = name_len;
- str_name->u16Size = name_len + 1;
- memcpy(str_name->String.utf8, info->name, name_len + 1);
-
-#define _IS_UTF8(_str) \
- (strcmp(_str, "utf8") == 0)
-#define _IS_EMPTY(_str) \
- (strcmp(_str, "") == 0)
-
- /* Check if NLS charset is valid and not points to UTF8 table */
- if (info->nls_name[0]) {
- if (_IS_UTF8(info->nls_name))
- sf_g->nls = NULL;
- else {
- sf_g->nls = load_nls(info->nls_name);
- if (!sf_g->nls) {
- err = -EINVAL;
- LogFunc(("failed to load nls %s\n",
- info->nls_name));
- kfree(str_name);
- goto fail1;
- }
- }
- } else {
+ int rc;
+ SHFLSTRING *str_name;
+ size_t name_len, str_len;
+ struct vbsf_super_info *pSuperInfo;
+
+ TRACE();
+
+ /*
+ * Validate info.
+ */
+ if ( info->nullchar != '\0'
+ || info->signature[0] != VBSF_MOUNT_SIGNATURE_BYTE_0
+ || info->signature[1] != VBSF_MOUNT_SIGNATURE_BYTE_1
+ || info->signature[2] != VBSF_MOUNT_SIGNATURE_BYTE_2) {
+ SFLOGRELBOTH(("vboxsf: Invalid info signature: %#x %#x %#x %#x!\n",
+ info->nullchar, info->signature[0], info->signature[1], info->signature[2]));
+ return -EINVAL;
+ }
+ name_len = RTStrNLen(info->name, sizeof(info->name));
+ if (name_len >= sizeof(info->name)) {
+ SFLOGRELBOTH(("vboxsf: Specified shared folder name is not zero terminated!\n"));
+ return -EINVAL;
+ }
+ if (RTStrNLen(info->nls_name, sizeof(info->nls_name)) >= sizeof(info->nls_name)) {
+ SFLOGRELBOTH(("vboxsf: Specified nls name is not zero terminated!\n"));
+ return -EINVAL;
+ }
+
+ /*
+ * Allocate memory.
+ */
+ str_len = offsetof(SHFLSTRING, String.utf8) + name_len + 1;
+ str_name = (PSHFLSTRING)kmalloc(str_len, GFP_KERNEL);
+ pSuperInfo = (struct vbsf_super_info *)kmalloc(sizeof(*pSuperInfo), GFP_KERNEL);
+ if (pSuperInfo && str_name) {
+ RT_ZERO(*pSuperInfo);
+
+ str_name->u16Length = name_len;
+ str_name->u16Size = name_len + 1;
+ memcpy(str_name->String.utf8, info->name, name_len + 1);
+
+ /*
+ * Init the NLS support, if needed.
+ */
+ rc = 0;
+#define _IS_UTF8(_str) (strcmp(_str, "utf8") == 0)
+#define _IS_EMPTY(_str) (strcmp(_str, "") == 0)
+
+ /* Check if NLS charset is valid and not points to UTF8 table */
+ pSuperInfo->fNlsIsUtf8 = true;
+ if (info->nls_name[0]) {
+ if (_IS_UTF8(info->nls_name)) {
+ SFLOGFLOW(("vbsf_super_info_alloc_and_map_it: nls=utf8\n"));
+ pSuperInfo->nls = NULL;
+ } else {
+ pSuperInfo->fNlsIsUtf8 = false;
+ pSuperInfo->nls = load_nls(info->nls_name);
+ if (pSuperInfo->nls) {
+ SFLOGFLOW(("vbsf_super_info_alloc_and_map_it: nls=%s -> %p\n", info->nls_name, pSuperInfo->nls));
+ } else {
+ SFLOGRELBOTH(("vboxsf: Failed to load nls '%s'!\n", info->nls_name));
+ rc = -EINVAL;
+ }
+ }
+ } else {
#ifdef CONFIG_NLS_DEFAULT
- /* If no NLS charset specified, try to load the default
- * one if it's not points to UTF8. */
- if (!_IS_UTF8(CONFIG_NLS_DEFAULT)
- && !_IS_EMPTY(CONFIG_NLS_DEFAULT))
- sf_g->nls = load_nls_default();
- else
- sf_g->nls = NULL;
+ /* If no NLS charset specified, try to load the default
+ * one if it's not points to UTF8. */
+ if (!_IS_UTF8(CONFIG_NLS_DEFAULT)
+ && !_IS_EMPTY(CONFIG_NLS_DEFAULT)) {
+ pSuperInfo->fNlsIsUtf8 = false;
+ pSuperInfo->nls = load_nls_default();
+ SFLOGFLOW(("vbsf_super_info_alloc_and_map_it: CONFIG_NLS_DEFAULT=%s -> %p\n", CONFIG_NLS_DEFAULT, pSuperInfo->nls));
+ } else {
+ SFLOGFLOW(("vbsf_super_info_alloc_and_map_it: nls=utf8 (default %s)\n", CONFIG_NLS_DEFAULT));
+ pSuperInfo->nls = NULL;
+ }
#else
- sf_g->nls = NULL;
+ SFLOGFLOW(("vbsf_super_info_alloc_and_map_it: nls=utf8 (no default)\n"));
+ pSuperInfo->nls = NULL;
#endif
-
+ }
#undef _IS_UTF8
#undef _IS_EMPTY
- }
-
- rc = VbglR0SfMapFolder(&client_handle, str_name, &sf_g->map);
- kfree(str_name);
-
- if (RT_FAILURE(rc)) {
- err = -EPROTO;
- LogFunc(("VbglR0SfMapFolder failed rc=%d\n", rc));
- goto fail2;
- }
-
- sf_g->ttl = info->ttl;
- sf_g->uid = info->uid;
- sf_g->gid = info->gid;
-
- if ((unsigned)info->length >= RT_UOFFSETOF(struct vbsf_mount_info_new, tag)) {
- /* new fields */
- sf_g->dmode = info->dmode;
- sf_g->fmode = info->fmode;
- sf_g->dmask = info->dmask;
- sf_g->fmask = info->fmask;
- } else {
- sf_g->dmode = ~0;
- sf_g->fmode = ~0;
- }
-
- if ((unsigned)info->length >= sizeof(struct vbsf_mount_info_new)) {
- AssertCompile(sizeof(sf_g->tag) >= sizeof(info->tag));
- memcpy(sf_g->tag, info->tag, sizeof(info->tag));
- sf_g->tag[sizeof(sf_g->tag) - 1] = '\0';
- } else {
- sf_g->tag[0] = '\0';
- }
-
- *sf_gp = sf_g;
- return 0;
-
- fail2:
- if (sf_g->nls)
- unload_nls(sf_g->nls);
-
- fail1:
- kfree(sf_g);
-
- fail0:
- return err;
+ if (rc == 0) {
+ /*
+ * Try mount it.
+ */
+ rc = VbglR0SfHostReqMapFolderWithContigSimple(str_name, virt_to_phys(str_name), RTPATH_DELIMITER,
+ true /*fCaseSensitive*/, &pSuperInfo->map.root);
+ if (RT_SUCCESS(rc)) {
+ kfree(str_name);
+
+ /* The rest is shared with remount. */
+ vbsf_super_info_copy_remount_options(pSuperInfo, info);
+
+ *sf_gp = pSuperInfo;
+ return 0;
+ }
+
+ /*
+ * bail out:
+ */
+ if (rc == VERR_FILE_NOT_FOUND) {
+ LogRel(("vboxsf: SHFL_FN_MAP_FOLDER failed for '%s': share not found\n", info->name));
+ rc = -ENXIO;
+ } else {
+ LogRel(("vboxsf: SHFL_FN_MAP_FOLDER failed for '%s': %Rrc\n", info->name, rc));
+ rc = -EPROTO;
+ }
+ if (pSuperInfo->nls)
+ unload_nls(pSuperInfo->nls);
+ }
+ } else {
+ SFLOGRELBOTH(("vboxsf: Could not allocate memory for super info!\n"));
+ rc = -ENOMEM;
+ }
+ if (str_name)
+ kfree(str_name);
+ if (pSuperInfo)
+ kfree(pSuperInfo);
+ return rc;
}
-/* unmap the share and free global info [sf_g] */
-static void sf_glob_free(struct sf_glob_info *sf_g)
+/* unmap the share and free super info [pSuperInfo] */
+static void vbsf_super_info_free(struct vbsf_super_info *pSuperInfo)
{
- int rc;
+ int rc;
- TRACE();
- rc = VbglR0SfUnmapFolder(&client_handle, &sf_g->map);
- if (RT_FAILURE(rc))
- LogFunc(("VbglR0SfUnmapFolder failed rc=%d\n", rc));
+ TRACE();
+ rc = VbglR0SfHostReqUnmapFolderSimple(pSuperInfo->map.root);
+ if (RT_FAILURE(rc))
+ LogFunc(("VbglR0SfHostReqUnmapFolderSimple failed rc=%Rrc\n", rc));
- if (sf_g->nls)
- unload_nls(sf_g->nls);
+ if (pSuperInfo->nls)
+ unload_nls(pSuperInfo->nls);
- kfree(sf_g);
+ kfree(pSuperInfo);
}
+
/**
- * This is called (by sf_read_super_[24|26] when vfs mounts the fs and
- * wants to read super_block.
- *
- * calls [sf_glob_alloc] to map the folder and allocate global
- * information structure.
- *
- * initializes [sb], initializes root inode and dentry.
- *
- * should respect [flags]
+ * Initialize backing device related matters.
*/
-static int sf_read_super_aux(struct super_block *sb, void *data, int flags)
+static int vbsf_init_backing_dev(struct super_block *sb, struct vbsf_super_info *pSuperInfo)
{
- int err;
- struct dentry *droot;
- struct inode *iroot;
- struct sf_inode_info *sf_i;
- struct sf_glob_info *sf_g;
- SHFLFSOBJINFO fsinfo;
- struct vbsf_mount_info_new *info;
- bool fInodePut = true;
-
- TRACE();
- if (!data) {
- LogFunc(("no mount info specified\n"));
- return -EINVAL;
- }
-
- info = data;
-
- if (flags & MS_REMOUNT) {
- LogFunc(("remounting is not supported\n"));
- return -ENOSYS;
- }
-
- err = sf_glob_alloc(info, &sf_g);
- if (err)
- goto fail0;
-
- sf_i = kmalloc(sizeof(*sf_i), GFP_KERNEL);
- if (!sf_i) {
- err = -ENOMEM;
- LogRelFunc(("could not allocate memory for root inode info\n"));
- goto fail1;
- }
-
- sf_i->handle = SHFL_HANDLE_NIL;
- sf_i->path = kmalloc(sizeof(SHFLSTRING) + 1, GFP_KERNEL);
- if (!sf_i->path) {
- err = -ENOMEM;
- LogRelFunc(("could not allocate memory for root inode path\n"));
- goto fail2;
- }
-
- sf_i->path->u16Length = 1;
- sf_i->path->u16Size = 2;
- sf_i->path->String.utf8[0] = '/';
- sf_i->path->String.utf8[1] = 0;
- sf_i->force_reread = 0;
-
- err = sf_stat(__func__, sf_g, sf_i->path, &fsinfo, 0);
- if (err) {
- LogFunc(("could not stat root of share\n"));
- goto fail3;
- }
-
- sb->s_magic = 0xface;
- sb->s_blocksize = 1024;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 3)
- /* Required for seek/sendfile.
- *
- * Must by less than or equal to INT64_MAX despite the fact that the
- * declaration of this variable is unsigned long long. See determination
- * of 'loff_t max' in fs/read_write.c / do_sendfile(). I don't know the
- * correct limit but MAX_LFS_FILESIZE (8TB-1 on 32-bit boxes) takes the
- * page cache into account and is the suggested limit. */
-# if defined MAX_LFS_FILESIZE
- sb->s_maxbytes = MAX_LFS_FILESIZE;
-# else
- sb->s_maxbytes = 0x7fffffffffffffffULL;
+ int rc = 0;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
+ /* Each new shared folder map gets a new uint64_t identifier,
+ * allocated in sequence. We ASSUME the sequence will not wrap. */
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 26)
+ static uint64_t s_u64Sequence = 0;
+ uint64_t idSeqMine = ASMAtomicIncU64(&s_u64Sequence);
# endif
+ struct backing_dev_info *bdi;
+
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
+ rc = super_setup_bdi_name(sb, "vboxsf-%llu", (unsigned long long)idSeqMine);
+ if (!rc)
+ bdi = sb->s_bdi;
+ else
+ return rc;
+# else
+ bdi = &pSuperInfo->bdi;
+# endif
+
+ bdi->ra_pages = 0; /* No readahead */
+
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 12)
+ bdi->capabilities = 0
+# ifdef BDI_CAP_MAP_DIRECT
+ | BDI_CAP_MAP_DIRECT /* MAP_SHARED */
+# endif
+# ifdef BDI_CAP_MAP_COPY
+ | BDI_CAP_MAP_COPY /* MAP_PRIVATE */
+# endif
+# ifdef BDI_CAP_READ_MAP
+ | BDI_CAP_READ_MAP /* can be mapped for reading */
+# endif
+# ifdef BDI_CAP_WRITE_MAP
+ | BDI_CAP_WRITE_MAP /* can be mapped for writing */
+# endif
+# ifdef BDI_CAP_EXEC_MAP
+ | BDI_CAP_EXEC_MAP /* can be mapped for execution */
+# endif
+# ifdef BDI_CAP_STRICTLIMIT
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0) /* Trouble with 3.16.x/debian8. Process stops after dirty page throttling.
+ * Only tested successfully with 4.19. Maybe skip altogether? */
+ | BDI_CAP_STRICTLIMIT;
+# endif
+# endif
+ ;
+# ifdef BDI_CAP_STRICTLIMIT
+ /* Smalles possible amount of dirty pages: %1 of RAM. We set this to
+ try reduce amount of data that's out of sync with the host side.
+ Besides, writepages isn't implemented, so flushing is extremely slow.
+ Note! Extremely slow linux 3.0.0 msync doesn't seem to be related to this setting. */
+ bdi_set_max_ratio(bdi, 1);
+# endif
+# endif /* >= 2.6.12 */
+
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)
+ rc = bdi_init(&pSuperInfo->bdi);
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 26)
+ if (!rc)
+ rc = bdi_register(&pSuperInfo->bdi, NULL, "vboxsf-%llu", (unsigned long long)idSeqMine);
+# endif /* >= 2.6.26 */
+# endif /* 4.11.0 > version >= 2.6.24 */
+#endif /* >= 2.6.0 */
+ return rc;
+}
+
+
+/**
+ * Undoes what vbsf_init_backing_dev did.
+ */
+static void vbsf_done_backing_dev(struct super_block *sb, struct vbsf_super_info *pSuperInfo)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) && LINUX_VERSION_CODE <= KERNEL_VERSION(3, 19, 0)
+ bdi_destroy(&pSuperInfo->bdi); /* includes bdi_unregister() */
#endif
- sb->s_op = &sf_super_ops;
+}
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 25)
- iroot = iget_locked(sb, 0);
-#else
- iroot = iget(sb, 0);
+
+/**
+ * Creates the root inode and attaches it to the super block.
+ *
+ * @returns 0 on success, negative errno on failure.
+ * @param sb The super block.
+ * @param pSuperInfo Our super block info.
+ */
+static int vbsf_create_root_inode(struct super_block *sb, struct vbsf_super_info *pSuperInfo)
+{
+ SHFLFSOBJINFO fsinfo;
+ int rc;
+
+ /*
+ * Allocate and initialize the memory for our inode info structure.
+ */
+ struct vbsf_inode_info *sf_i = kmalloc(sizeof(*sf_i), GFP_KERNEL);
+ SHFLSTRING *path = kmalloc(sizeof(SHFLSTRING) + 1, GFP_KERNEL);
+ if (sf_i && path) {
+ sf_i->handle = SHFL_HANDLE_NIL;
+ sf_i->force_restat = false;
+ RTListInit(&sf_i->HandleList);
+#ifdef VBOX_STRICT
+ sf_i->u32Magic = SF_INODE_INFO_MAGIC;
#endif
- if (!iroot) {
- err = -ENOMEM; /* XXX */
- LogFunc(("could not get root inode\n"));
- goto fail3;
- }
-
- if (sf_init_backing_dev(sf_g)) {
- err = -EINVAL;
- LogFunc(("could not init bdi\n"));
+ sf_i->path = path;
+
+ path->u16Length = 1;
+ path->u16Size = 2;
+ path->String.utf8[0] = '/';
+ path->String.utf8[1] = 0;
+
+ /*
+ * Stat the root directory (for inode info).
+ */
+ rc = vbsf_stat(__func__, pSuperInfo, sf_i->path, &fsinfo, 0);
+ if (rc == 0) {
+ /*
+ * Create the actual inode structure.
+ * Note! ls -la does display '.' and '..' entries with st_ino == 0, so root is #1.
+ */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 25)
- unlock_new_inode(iroot);
+ struct inode *iroot = iget_locked(sb, 1);
+#else
+ struct inode *iroot = iget(sb, 1);
#endif
- goto fail4;
- }
-
- sf_init_inode(sf_g, iroot, &fsinfo);
- SET_INODE_INFO(iroot, sf_i);
+ if (iroot) {
+ vbsf_init_inode(iroot, sf_i, &fsinfo, pSuperInfo);
+ VBSF_SET_INODE_INFO(iroot, sf_i);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 25)
- unlock_new_inode(iroot);
+ unlock_new_inode(iroot);
#endif
+ /*
+ * Now make it a root inode.
+ */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
- droot = d_make_root(iroot);
+ sb->s_root = d_make_root(iroot);
#else
- droot = d_alloc_root(iroot);
-#endif
- if (!droot) {
- err = -ENOMEM; /* XXX */
- LogFunc(("d_alloc_root failed\n"));
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
- fInodePut = false;
+ sb->s_root = d_alloc_root(iroot);
#endif
- goto fail5;
- }
+ if (sb->s_root) {
- sb->s_root = droot;
- SET_GLOB_INFO(sb, sf_g);
- return 0;
+ return 0;
+ }
- fail5:
- sf_done_backing_dev(sf_g);
-
- fail4:
- if (fInodePut)
- iput(iroot);
-
- fail3:
- kfree(sf_i->path);
+ SFLOGRELBOTH(("vboxsf: d_make_root failed!\n"));
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0) /* d_make_root calls iput */
+ iput(iroot);
+#endif
+ /* iput() will call vbsf_evict_inode()/vbsf_clear_inode(). */
+ sf_i = NULL;
+ path = NULL;
+
+ rc = -ENOMEM;
+ } else {
+ SFLOGRELBOTH(("vboxsf: failed to allocate root inode!\n"));
+ rc = -ENOMEM;
+ }
+ } else
+ SFLOGRELBOTH(("vboxsf: could not stat root of share: %d\n", rc));
+ } else {
+ SFLOGRELBOTH(("vboxsf: Could not allocate memory for root inode info!\n"));
+ rc = -ENOMEM;
+ }
+ if (sf_i)
+ kfree(sf_i);
+ if (path)
+ kfree(path);
+ return rc;
+}
- fail2:
- kfree(sf_i);
- fail1:
- sf_glob_free(sf_g);
+/**
+ * This is called by vbsf_read_super_24() and vbsf_read_super_26() when vfs mounts
+ * the fs and wants to read super_block.
+ *
+ * Calls vbsf_super_info_alloc_and_map_it() to map the folder and allocate super
+ * information structure.
+ *
+ * Initializes @a sb, initializes root inode and dentry.
+ *
+ * Should respect @a flags.
+ */
+static int vbsf_read_super_aux(struct super_block *sb, void *data, int flags)
+{
+ int rc;
+ struct vbsf_super_info *pSuperInfo;
+
+ TRACE();
+ if (!data) {
+ SFLOGRELBOTH(("vboxsf: No mount data. Is mount.vboxsf installed (typically in /sbin)?\n"));
+ return -EINVAL;
+ }
+
+ if (flags & MS_REMOUNT) {
+ SFLOGRELBOTH(("vboxsf: Remounting is not supported!\n"));
+ return -ENOSYS;
+ }
+
+ /*
+ * Create our super info structure and map the shared folder.
+ */
+ rc = vbsf_super_info_alloc_and_map_it((struct vbsf_mount_info_new *)data, &pSuperInfo);
+ if (rc == 0) {
+ /*
+ * Initialize the super block structure (must be done before
+ * root inode creation).
+ */
+ sb->s_magic = 0xface;
+ sb->s_blocksize = 1024;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 3)
+ /* Required for seek/sendfile (see 'loff_t max' in fs/read_write.c / do_sendfile()). */
+# if defined MAX_LFS_FILESIZE
+ sb->s_maxbytes = MAX_LFS_FILESIZE;
+# elif BITS_PER_LONG == 32
+ sb->s_maxbytes = (loff_t)ULONG_MAX << PAGE_SHIFT;
+# else
+ sb->s_maxbytes = INT64_MAX;
+# endif
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11)
+ sb->s_time_gran = 1; /* This might be a little optimistic for windows hosts, where it should be 100. */
+#endif
+ sb->s_op = &g_vbsf_super_ops;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 38)
+ sb->s_d_op = &vbsf_dentry_ops;
+#endif
- fail0:
- return err;
+ /*
+ * Initialize the backing device. This is important for memory mapped
+ * files among other things.
+ */
+ rc = vbsf_init_backing_dev(sb, pSuperInfo);
+ if (rc == 0) {
+ /*
+ * Create the root inode and we're done.
+ */
+ rc = vbsf_create_root_inode(sb, pSuperInfo);
+ if (rc == 0) {
+ VBSF_SET_SUPER_INFO(sb, pSuperInfo);
+ SFLOGFLOW(("vbsf_read_super_aux: returns successfully\n"));
+ return 0;
+ }
+ vbsf_done_backing_dev(sb, pSuperInfo);
+ } else
+ SFLOGRELBOTH(("vboxsf: backing device information initialization failed: %d\n", rc));
+ vbsf_super_info_free(pSuperInfo);
+ }
+ return rc;
}
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
-static struct super_block *sf_read_super_24(struct super_block *sb, void *data,
- int flags)
-{
- int err;
- TRACE();
- err = sf_read_super_aux(sb, data, flags);
- if (err)
- return NULL;
-
- return sb;
-}
+/**
+ * This is called when vfs is about to destroy the @a inode.
+ *
+ * We must free the inode info structure here.
+ */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
+static void vbsf_evict_inode(struct inode *inode)
+#else
+static void vbsf_clear_inode(struct inode *inode)
#endif
-
-/* this is called when vfs is about to destroy the [inode]. all
- resources associated with this [inode] must be cleared here */
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)
-static void sf_clear_inode(struct inode *inode)
{
- struct sf_inode_info *sf_i;
-
- TRACE();
- sf_i = GET_INODE_INFO(inode);
- if (!sf_i)
- return;
+ struct vbsf_inode_info *sf_i;
- BUG_ON(!sf_i->path);
- kfree(sf_i->path);
- kfree(sf_i);
- SET_INODE_INFO(inode, NULL);
-}
-#else /* LINUX_VERSION_CODE >= 2.6.36 */
-static void sf_evict_inode(struct inode *inode)
-{
- struct sf_inode_info *sf_i;
+ TRACE();
- TRACE();
- truncate_inode_pages(&inode->i_data, 0);
+ /*
+ * Flush stuff.
+ */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
+ truncate_inode_pages(&inode->i_data, 0);
# if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
- clear_inode(inode);
+ clear_inode(inode);
# else
- end_writeback(inode);
+ end_writeback(inode);
# endif
-
- sf_i = GET_INODE_INFO(inode);
- if (!sf_i)
- return;
-
- BUG_ON(!sf_i->path);
- kfree(sf_i->path);
- kfree(sf_i);
- SET_INODE_INFO(inode, NULL);
+#endif
+ /*
+ * Clean up our inode info.
+ */
+ sf_i = VBSF_GET_INODE_INFO(inode);
+ if (sf_i) {
+ VBSF_SET_INODE_INFO(inode, NULL);
+
+ Assert(sf_i->u32Magic == SF_INODE_INFO_MAGIC);
+ BUG_ON(!sf_i->path);
+ kfree(sf_i->path);
+ vbsf_handle_drop_chain(sf_i);
+# ifdef VBOX_STRICT
+ sf_i->u32Magic = SF_INODE_INFO_MAGIC_DEAD;
+# endif
+ kfree(sf_i);
+ }
}
-#endif /* LINUX_VERSION_CODE >= 2.6.36 */
+
/* this is called by vfs when it wants to populate [inode] with data.
the only thing that is known about inode at this point is its index
hence we can't do anything here, and let lookup/whatever with the
job to properly fill then [inode] */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25)
-static void sf_read_inode(struct inode *inode)
+static void vbsf_read_inode(struct inode *inode)
{
}
#endif
-/* vfs is done with [sb] (umount called) call [sf_glob_free] to unmap
- the folder and free [sf_g] */
-static void sf_put_super(struct super_block *sb)
+
+/* vfs is done with [sb] (umount called) call [vbsf_super_info_free] to unmap
+ the folder and free [pSuperInfo] */
+static void vbsf_put_super(struct super_block *sb)
{
- struct sf_glob_info *sf_g;
+ struct vbsf_super_info *pSuperInfo;
- sf_g = GET_GLOB_INFO(sb);
- BUG_ON(!sf_g);
- sf_done_backing_dev(sf_g);
- sf_glob_free(sf_g);
+ pSuperInfo = VBSF_GET_SUPER_INFO(sb);
+ BUG_ON(!pSuperInfo);
+ vbsf_done_backing_dev(sb, pSuperInfo);
+ vbsf_super_info_free(pSuperInfo);
}
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
-static int sf_statfs(struct super_block *sb, STRUCT_STATFS * stat)
-{
- return sf_get_volume_info(sb, stat);
-}
+
+/**
+ * Get file system statistics.
+ */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)
+static int vbsf_statfs(struct dentry *dentry, struct kstatfs *stat)
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 73)
+static int vbsf_statfs(struct super_block *sb, struct kstatfs *stat)
#else
-static int sf_statfs(struct dentry *dentry, STRUCT_STATFS * stat)
+static int vbsf_statfs(struct super_block *sb, struct statfs *stat)
+#endif
{
- struct super_block *sb = dentry->d_inode->i_sb;
- return sf_get_volume_info(sb, stat);
-}
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)
+ struct super_block *sb = dentry->d_inode->i_sb;
+#endif
+ int rc;
+ VBOXSFVOLINFOREQ *pReq = (VBOXSFVOLINFOREQ *)VbglR0PhysHeapAlloc(sizeof(*pReq));
+ if (pReq) {
+ SHFLVOLINFO *pVolInfo = &pReq->VolInfo;
+ struct vbsf_super_info *pSuperInfo = VBSF_GET_SUPER_INFO(sb);
+ rc = VbglR0SfHostReqQueryVolInfo(pSuperInfo->map.root, pReq, SHFL_HANDLE_ROOT);
+ if (RT_SUCCESS(rc)) {
+ stat->f_type = UINT32_C(0x786f4256); /* 'VBox' little endian */
+ stat->f_bsize = pVolInfo->ulBytesPerAllocationUnit;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 73)
+ stat->f_frsize = pVolInfo->ulBytesPerAllocationUnit;
+#endif
+ stat->f_blocks = pVolInfo->ullTotalAllocationBytes
+ / pVolInfo->ulBytesPerAllocationUnit;
+ stat->f_bfree = pVolInfo->ullAvailableAllocationBytes
+ / pVolInfo->ulBytesPerAllocationUnit;
+ stat->f_bavail = pVolInfo->ullAvailableAllocationBytes
+ / pVolInfo->ulBytesPerAllocationUnit;
+ stat->f_files = 1000;
+ stat->f_ffree = 1000000; /* don't return 0 here since the guest may think
+ * that it is not possible to create any more files */
+ stat->f_fsid.val[0] = 0;
+ stat->f_fsid.val[1] = 0;
+ stat->f_namelen = 255;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
+ stat->f_flags = 0; /* not valid */
#endif
+ RT_ZERO(stat->f_spare);
+ rc = 0;
+ } else
+ rc = -RTErrConvertToErrno(rc);
+ VbglR0PhysHeapFree(pReq);
+ } else
+ rc = -ENOMEM;
+ return rc;
+}
-static int sf_remount_fs(struct super_block *sb, int *flags, char *data)
+static int vbsf_remount_fs(struct super_block *sb, int *flags, char *data)
{
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 23)
- struct sf_glob_info *sf_g;
- struct sf_inode_info *sf_i;
- struct inode *iroot;
- SHFLFSOBJINFO fsinfo;
- int err;
-
- sf_g = GET_GLOB_INFO(sb);
- BUG_ON(!sf_g);
- if (data && data[0] != 0) {
- struct vbsf_mount_info_new *info =
- (struct vbsf_mount_info_new *)data;
- if (info->signature[0] == VBSF_MOUNT_SIGNATURE_BYTE_0
- && info->signature[1] == VBSF_MOUNT_SIGNATURE_BYTE_1
- && info->signature[2] == VBSF_MOUNT_SIGNATURE_BYTE_2) {
- sf_g->uid = info->uid;
- sf_g->gid = info->gid;
- sf_g->ttl = info->ttl;
- if ((unsigned)info->length >= RT_UOFFSETOF(struct vbsf_mount_info_new, tag)) {
- sf_g->dmode = info->dmode;
- sf_g->fmode = info->fmode;
- sf_g->dmask = info->dmask;
- sf_g->fmask = info->fmask;
- } else {
- sf_g->dmode = ~0;
- sf_g->fmode = ~0;
- }
- if ((unsigned)info->length >= sizeof(struct vbsf_mount_info_new)) {
- AssertCompile(sizeof(sf_g->tag) >= sizeof(info->tag));
- memcpy(sf_g->tag, info->tag, sizeof(info->tag));
- sf_g->tag[sizeof(sf_g->tag) - 1] = '\0';
- } else {
- sf_g->tag[0] = '\0';
- }
- }
- }
-
- iroot = ilookup(sb, 0);
- if (!iroot)
- return -ENOSYS;
-
- sf_i = GET_INODE_INFO(iroot);
- err = sf_stat(__func__, sf_g, sf_i->path, &fsinfo, 0);
- BUG_ON(err != 0);
- sf_init_inode(sf_g, iroot, &fsinfo);
- /*unlock_new_inode(iroot); */
- return 0;
+ struct vbsf_super_info *pSuperInfo = pSuperInfo = VBSF_GET_SUPER_INFO(sb);
+ struct vbsf_inode_info *sf_i;
+ struct inode *iroot;
+ SHFLFSOBJINFO fsinfo;
+ int err;
+ Assert(pSuperInfo);
+
+ if (data && data[0] != 0) {
+ struct vbsf_mount_info_new *info = (struct vbsf_mount_info_new *)data;
+ if ( info->nullchar == '\0'
+ && info->signature[0] == VBSF_MOUNT_SIGNATURE_BYTE_0
+ && info->signature[1] == VBSF_MOUNT_SIGNATURE_BYTE_1
+ && info->signature[2] == VBSF_MOUNT_SIGNATURE_BYTE_2) {
+ vbsf_super_info_copy_remount_options(pSuperInfo, info);
+ }
+ }
+
+ iroot = ilookup(sb, 0);
+ if (!iroot)
+ return -ENOSYS;
+
+ sf_i = VBSF_GET_INODE_INFO(iroot);
+ err = vbsf_stat(__func__, pSuperInfo, sf_i->path, &fsinfo, 0);
+ BUG_ON(err != 0);
+ vbsf_init_inode(iroot, sf_i, &fsinfo, pSuperInfo);
+ /*unlock_new_inode(iroot); */
+ return 0;
#else /* LINUX_VERSION_CODE < 2.4.23 */
- return -ENOSYS;
+ return -ENOSYS;
#endif /* LINUX_VERSION_CODE < 2.4.23 */
}
-/** Show mount options. */
+
+/**
+ * Show mount options.
+ *
+ * This is needed by the VBoxService automounter in order for it to pick up
+ * the the 'szTag' option value it sets on its mount.
+ */
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0)
-static int sf_show_options(struct seq_file *m, struct vfsmount *mnt)
+static int vbsf_show_options(struct seq_file *m, struct vfsmount *mnt)
#else
-static int sf_show_options(struct seq_file *m, struct dentry *root)
+static int vbsf_show_options(struct seq_file *m, struct dentry *root)
#endif
{
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0)
- struct super_block *sb = mnt->mnt_sb;
+ struct super_block *sb = mnt->mnt_sb;
#else
- struct super_block *sb = root->d_sb;
+ struct super_block *sb = root->d_sb;
#endif
- struct sf_glob_info *sf_g = GET_GLOB_INFO(sb);
- if (sf_g) {
- seq_printf(m, ",uid=%u,gid=%u,ttl=%u,dmode=0%o,fmode=0%o,dmask=0%o,fmask=0%o",
- sf_g->uid, sf_g->gid, sf_g->ttl, sf_g->dmode, sf_g->fmode, sf_g->dmask, sf_g->fmask);
- if (sf_g->tag[0] != '\0') {
- seq_puts(m, ",tag=");
- seq_escape(m, sf_g->tag, " \t\n\\");
- }
- }
-
+ struct vbsf_super_info *pSuperInfo = VBSF_GET_SUPER_INFO(sb);
+ if (pSuperInfo) {
+ /* Performance related options: */
+ if (pSuperInfo->msTTL != -1)
+ seq_printf(m, ",ttl=%d", pSuperInfo->msTTL);
+ if (pSuperInfo->msDirCacheTTL >= 0)
+ seq_printf(m, ",dcachettl=%d", pSuperInfo->msDirCacheTTL);
+ if (pSuperInfo->msInodeTTL >= 0)
+ seq_printf(m, ",inodettl=%d", pSuperInfo->msInodeTTL);
+ if (pSuperInfo->cMaxIoPages != VBSF_DEFAULT_MAX_IO_PAGES)
+ seq_printf(m, ",maxiopages=%u", pSuperInfo->cMaxIoPages);
+ if (pSuperInfo->cbDirBuf != VBSF_DEFAULT_DIR_BUF_SIZE)
+ seq_printf(m, ",dirbuf=%u", pSuperInfo->cbDirBuf);
+ switch (pSuperInfo->enmCacheMode) {
+ default: AssertFailed();
+ case kVbsfCacheMode_Strict:
+ break;
+ case kVbsfCacheMode_None: seq_puts(m, ",cache=none"); break;
+ case kVbsfCacheMode_Read: seq_puts(m, ",cache=read"); break;
+ case kVbsfCacheMode_ReadWrite: seq_puts(m, ",cache=readwrite"); break;
+ }
+
+ /* Attributes and NLS: */
+ seq_printf(m, ",iocharset=%s", pSuperInfo->nls ? pSuperInfo->nls->charset : "utf8");
+ seq_printf(m, ",uid=%u,gid=%u", pSuperInfo->uid, pSuperInfo->gid);
+ if (pSuperInfo->dmode != ~0)
+ seq_printf(m, ",dmode=0%o", pSuperInfo->dmode);
+ if (pSuperInfo->fmode != ~0)
+ seq_printf(m, ",fmode=0%o", pSuperInfo->fmode);
+ if (pSuperInfo->dmask != 0)
+ seq_printf(m, ",dmask=0%o", pSuperInfo->dmask);
+ if (pSuperInfo->fmask != 0)
+ seq_printf(m, ",fmask=0%o", pSuperInfo->fmask);
+
+ /* Misc: */
+ if (pSuperInfo->szTag[0] != '\0') {
+ seq_puts(m, ",tag=");
+ seq_escape(m, pSuperInfo->szTag, " \t\n\\");
+ }
+ }
return 0;
}
-static struct super_operations sf_super_ops = {
+
+/**
+ * Super block operations.
+ */
+static struct super_operations g_vbsf_super_ops = {
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)
- .clear_inode = sf_clear_inode,
+ .clear_inode = vbsf_clear_inode,
#else
- .evict_inode = sf_evict_inode,
+ .evict_inode = vbsf_evict_inode,
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25)
- .read_inode = sf_read_inode,
+ .read_inode = vbsf_read_inode,
#endif
- .put_super = sf_put_super,
- .statfs = sf_statfs,
- .remount_fs = sf_remount_fs,
- .show_options = sf_show_options
+ .put_super = vbsf_put_super,
+ .statfs = vbsf_statfs,
+ .remount_fs = vbsf_remount_fs,
+ .show_options = vbsf_show_options
};
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
-static DECLARE_FSTYPE(vboxsf_fs_type, "vboxsf", sf_read_super_24, 0);
-#else
-static int sf_read_super_26(struct super_block *sb, void *data, int flags)
+
+
+/*********************************************************************************************************************************
+* File system type related stuff. *
+*********************************************************************************************************************************/
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 4)
+
+static int vbsf_read_super_26(struct super_block *sb, void *data, int flags)
{
- int err;
+ int err;
- TRACE();
- err = sf_read_super_aux(sb, data, flags);
- if (err)
- printk(KERN_DEBUG "sf_read_super_aux err=%d\n", err);
+ TRACE();
+ err = vbsf_read_super_aux(sb, data, flags);
+ if (err)
+ printk(KERN_DEBUG "vbsf_read_super_aux err=%d\n", err);
- return err;
+ return err;
}
# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
-static struct super_block *sf_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name,
- void *data)
+static struct super_block *vbsf_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data)
{
- TRACE();
- return get_sb_nodev(fs_type, flags, data, sf_read_super_26);
+ TRACE();
+ return get_sb_nodev(fs_type, flags, data, vbsf_read_super_26);
}
# elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39)
-static int sf_get_sb(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data, struct vfsmount *mnt)
+static int vbsf_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- TRACE();
- return get_sb_nodev(fs_type, flags, data, sf_read_super_26, mnt);
+ TRACE();
+ return get_sb_nodev(fs_type, flags, data, vbsf_read_super_26, mnt);
}
# else /* LINUX_VERSION_CODE >= 2.6.39 */
-static struct dentry *sf_mount(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data)
+static struct dentry *sf_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data)
{
- TRACE();
- return mount_nodev(fs_type, flags, data, sf_read_super_26);
+ TRACE();
+ return mount_nodev(fs_type, flags, data, vbsf_read_super_26);
}
# endif /* LINUX_VERSION_CODE >= 2.6.39 */
-static struct file_system_type vboxsf_fs_type = {
- .owner = THIS_MODULE,
- .name = "vboxsf",
+/**
+ * File system registration structure.
+ */
+static struct file_system_type g_vboxsf_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "vboxsf",
# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39)
- .get_sb = sf_get_sb,
+ .get_sb = vbsf_get_sb,
# else
- .mount = sf_mount,
+ .mount = sf_mount,
# endif
- .kill_sb = kill_anon_super
+ .kill_sb = kill_anon_super
};
-#endif /* LINUX_VERSION_CODE >= 2.6.0 */
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
-static int follow_symlinks = 0;
-module_param(follow_symlinks, int, 0);
-MODULE_PARM_DESC(follow_symlinks,
- "Let host resolve symlinks rather than showing them");
-#endif
+#else /* LINUX_VERSION_CODE < 2.5.4 */
-/* Module initialization/finalization handlers */
-static int __init init(void)
+static struct super_block *vbsf_read_super_24(struct super_block *sb, void *data, int flags)
{
- int rcVBox;
- int rcRet = 0;
- int err;
-
- TRACE();
-
- if (sizeof(struct vbsf_mount_info_new) > PAGE_SIZE) {
- printk(KERN_ERR
- "Mount information structure is too large %lu\n"
- "Must be less than or equal to %lu\n",
- (unsigned long)sizeof(struct vbsf_mount_info_new),
- (unsigned long)PAGE_SIZE);
- return -EINVAL;
- }
-
- err = register_filesystem(&vboxsf_fs_type);
- if (err) {
- LogFunc(("register_filesystem err=%d\n", err));
- return err;
- }
-
- rcVBox = VbglR0HGCMInit();
- if (RT_FAILURE(rcVBox)) {
- LogRelFunc(("VbglR0HGCMInit failed, rc=%d\n", rcVBox));
- rcRet = -EPROTO;
- goto fail0;
- }
-
- rcVBox = VbglR0SfConnect(&client_handle);
- if (RT_FAILURE(rcVBox)) {
- LogRelFunc(("VbglR0SfConnect failed, rc=%d\n", rcVBox));
- rcRet = -EPROTO;
- goto fail1;
- }
-
- rcVBox = VbglR0SfSetUtf8(&client_handle);
- if (RT_FAILURE(rcVBox)) {
- LogRelFunc(("VbglR0SfSetUtf8 failed, rc=%d\n", rcVBox));
- rcRet = -EPROTO;
- goto fail2;
- }
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
- if (!follow_symlinks) {
- rcVBox = VbglR0SfSetSymlinks(&client_handle);
- if (RT_FAILURE(rcVBox)) {
- printk(KERN_WARNING
- "vboxsf: Host unable to show symlinks, rc=%d\n",
- rcVBox);
- }
- }
-#endif
+ int err;
- printk(KERN_DEBUG
- "vboxsf: Successfully loaded version " VBOX_VERSION_STRING
- " (interface " RT_XSTR(VMMDEV_VERSION) ")\n");
+ TRACE();
+ err = vbsf_read_super_aux(sb, data, flags);
+ if (err) {
+ printk(KERN_DEBUG "vbsf_read_super_aux err=%d\n", err);
+ return NULL;
+ }
+
+ return sb;
+}
- return 0;
+static DECLARE_FSTYPE(g_vboxsf_fs_type, "vboxsf", vbsf_read_super_24, 0);
- fail2:
- VbglR0SfDisconnect(&client_handle);
+#endif /* LINUX_VERSION_CODE < 2.5.4 */
- fail1:
- VbglR0HGCMTerminate();
- fail0:
- unregister_filesystem(&vboxsf_fs_type);
- return rcRet;
+
+/*********************************************************************************************************************************
+* Module stuff *
+*********************************************************************************************************************************/
+
+/**
+ * Called on module initialization.
+ */
+static int __init init(void)
+{
+ int rc;
+ SFLOGFLOW(("vboxsf: init\n"));
+
+ /*
+ * Must be paranoid about the vbsf_mount_info_new size.
+ */
+ AssertCompile(sizeof(struct vbsf_mount_info_new) <= PAGE_SIZE);
+ if (sizeof(struct vbsf_mount_info_new) > PAGE_SIZE) {
+ printk(KERN_ERR
+ "vboxsf: Mount information structure is too large %lu\n"
+ "vboxsf: Must be less than or equal to %lu\n",
+ (unsigned long)sizeof(struct vbsf_mount_info_new),
+ (unsigned long)PAGE_SIZE);
+ return -EINVAL;
+ }
+
+ /*
+ * Initialize stuff.
+ */
+ spin_lock_init(&g_SfHandleLock);
+ rc = VbglR0SfInit();
+ if (RT_SUCCESS(rc)) {
+ /*
+ * Try connect to the shared folder HGCM service.
+ * It is possible it is not there.
+ */
+ rc = VbglR0SfConnect(&g_SfClient);
+ if (RT_SUCCESS(rc)) {
+ /*
+ * Query host HGCM features and afterwards (must be last) shared folder features.
+ */
+ rc = VbglR0QueryHostFeatures(&g_fHostFeatures);
+ if (RT_FAILURE(rc))
+ {
+ LogRel(("vboxsf: VbglR0QueryHostFeatures failed: rc=%Rrc (ignored)\n", rc));
+ g_fHostFeatures = 0;
+ }
+ VbglR0SfHostReqQueryFeaturesSimple(&g_fSfFeatures, &g_uSfLastFunction);
+ LogRel(("vboxsf: g_fHostFeatures=%#x g_fSfFeatures=%#RX64 g_uSfLastFunction=%u\n",
+ g_fHostFeatures, g_fSfFeatures, g_uSfLastFunction));
+
+ /*
+ * Tell the shared folder service about our expectations:
+ * - UTF-8 strings (rather than UTF-16)
+ * - Wheter to return or follow (default) symbolic links.
+ */
+ rc = VbglR0SfHostReqSetUtf8Simple();
+ if (RT_SUCCESS(rc)) {
+ if (!g_fFollowSymlinks) {
+ rc = VbglR0SfHostReqSetSymlinksSimple();
+ if (RT_FAILURE(rc))
+ printk(KERN_WARNING "vboxsf: Host unable to enable showing symlinks, rc=%d\n", rc);
+ }
+ /*
+ * Now that we're ready for action, try register the
+ * file system with the kernel.
+ */
+ rc = register_filesystem(&g_vboxsf_fs_type);
+ if (rc == 0) {
+ printk(KERN_INFO "vboxsf: Successfully loaded version " VBOX_VERSION_STRING "\n");
+ return 0;
+ }
+
+ /*
+ * Failed. Bail out.
+ */
+ LogRel(("vboxsf: register_filesystem failed: rc=%d\n", rc));
+ } else {
+ LogRel(("vboxsf: VbglR0SfSetUtf8 failed, rc=%Rrc\n", rc));
+ rc = -EPROTO;
+ }
+ VbglR0SfDisconnect(&g_SfClient);
+ } else {
+ LogRel(("vboxsf: VbglR0SfConnect failed, rc=%Rrc\n", rc));
+ rc = rc == VERR_HGCM_SERVICE_NOT_FOUND ? -EHOSTDOWN : -ECONNREFUSED;
+ }
+ VbglR0SfTerm();
+ } else {
+ LogRel(("vboxsf: VbglR0SfInit failed, rc=%Rrc\n", rc));
+ rc = -EPROTO;
+ }
+ return rc;
}
+
+/**
+ * Called on module finalization.
+ */
static void __exit fini(void)
{
- TRACE();
+ SFLOGFLOW(("vboxsf: fini\n"));
- VbglR0SfDisconnect(&client_handle);
- VbglR0HGCMTerminate();
- unregister_filesystem(&vboxsf_fs_type);
+ unregister_filesystem(&g_vboxsf_fs_type);
+ VbglR0SfDisconnect(&g_SfClient);
+ VbglR0SfTerm();
}
+
+/*
+ * Module parameters.
+ */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 52)
+module_param_named(follow_symlinks, g_fFollowSymlinks, int, 0);
+MODULE_PARM_DESC(follow_symlinks,
+ "Let host resolve symlinks rather than showing them");
+#endif
+
+
+/*
+ * Module declaration related bits.
+ */
module_init(init);
module_exit(fini);
-/* C++ hack */
-int __gxx_personality_v0 = 0xdeadbeef;
+MODULE_DESCRIPTION(VBOX_PRODUCT " VFS Module for Host File System Access");
+MODULE_AUTHOR(VBOX_VENDOR);
+MODULE_LICENSE("GPL and additional rights");
+#ifdef MODULE_ALIAS_FS
+MODULE_ALIAS_FS("vboxsf");
+#endif
+#ifdef MODULE_VERSION
+MODULE_VERSION(VBOX_VERSION_STRING " r" RT_XSTR(VBOX_SVN_REV));
+#endif
+
diff --git a/ubuntu/vbox/vboxsf/vfsmod.h b/ubuntu/vbox/vboxsf/vfsmod.h
index f7b71831c3da..1079ca069dd6 100644
--- a/ubuntu/vbox/vboxsf/vfsmod.h
+++ b/ubuntu/vbox/vboxsf/vfsmod.h
@@ -34,8 +34,15 @@
# pragma once
#endif
+#if 0 /* Enables strict checks. */
+# define RT_STRICT
+# define VBOX_STRICT
+#endif
+
#define LOG_GROUP LOG_GROUP_SHARED_FOLDERS
#include "the-linux-kernel.h"
+#include <iprt/list.h>
+#include <iprt/asm.h>
#include <VBox/log.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
@@ -43,139 +50,407 @@
#endif
#include <VBox/VBoxGuestLibSharedFolders.h>
+#include <VBox/VBoxGuestLibSharedFoldersInline.h>
+#include <iprt/asm.h>
#include "vbsfmount.h"
-#define DIR_BUFFER_SIZE (16*_1K)
-
-/* per-shared folder information */
-struct sf_glob_info {
- VBGLSFMAP map;
- struct nls_table *nls;
- int ttl;
- int uid;
- int gid;
- int dmode;
- int fmode;
- int dmask;
- int fmask;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
- struct backing_dev_info bdi;
+
+/*
+ * Logging wrappers.
+ */
+#if 1
+# define TRACE() LogFunc(("tracepoint\n"))
+# define SFLOG(aArgs) Log(aArgs)
+# define SFLOGFLOW(aArgs) LogFlow(aArgs)
+# define SFLOG2(aArgs) Log2(aArgs)
+# define SFLOG3(aArgs) Log3(aArgs)
+# define SFLOGRELBOTH(aArgs) LogRel(aArgs)
+# ifdef LOG_ENABLED
+# define SFLOG_ENABLED 1
+# endif
+#else
+# define TRACE() RTLogBackdoorPrintf("%s: tracepoint\n", __FUNCTION__)
+# define SFLOG(aArgs) RTLogBackdoorPrintf aArgs
+# define SFLOGFLOW(aArgs) RTLogBackdoorPrintf aArgs
+# define SFLOG2(aArgs) RTLogBackdoorPrintf aArgs
+# define SFLOG3(aArgs) RTLogBackdoorPrintf aArgs
+# define SFLOG_ENABLED 1
+# define SFLOGRELBOTH(aArgs) do { RTLogBackdoorPrintf aArgs; printk aArgs; } while (0)
#endif
- char tag[32]; /**< Mount tag for VBoxService automounter. @since 6.0 */
-};
-/* per-inode information */
-struct sf_inode_info {
- /* which file */
- SHFLSTRING *path;
- /* some information was changed, update data on next revalidate */
- int force_restat;
- /* directory content changed, update the whole directory on next sf_getdent */
- int force_reread;
- /* file structure, only valid between open() and release() */
- struct file *file;
- /* handle valid if a file was created with sf_create_aux until it will
- * be opened with sf_reg_open() */
- SHFLHANDLE handle;
-};
-struct sf_dir_info {
- struct list_head info_list;
-};
+/*
+ * inode compatibility glue.
+ */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
-struct sf_dir_buf {
- size_t cEntries;
- size_t cbFree;
- size_t cbUsed;
- void *buf;
- struct list_head head;
-};
+DECLINLINE(loff_t) i_size_read(struct inode *pInode)
+{
+ AssertCompile(sizeof(loff_t) == sizeof(uint64_t));
+ return ASMAtomicReadU64((uint64_t volatile *)&pInode->i_size);
+}
-struct sf_reg_info {
- SHFLHANDLE handle;
-};
+DECLINLINE(void) i_size_write(struct inode *pInode, loff_t cbNew)
+{
+ AssertCompile(sizeof(pInode->i_size) == sizeof(uint64_t));
+ ASMAtomicWriteU64((uint64_t volatile *)&pInode->i_size, cbNew);
+}
-/* globals */
-extern VBGLSFCLIENT client_handle;
-
-/* forward declarations */
-extern struct inode_operations sf_dir_iops;
-extern struct inode_operations sf_lnk_iops;
-extern struct inode_operations sf_reg_iops;
-extern struct file_operations sf_dir_fops;
-extern struct file_operations sf_reg_fops;
-extern struct dentry_operations sf_dentry_ops;
-extern struct address_space_operations sf_reg_aops;
-
-extern void sf_init_inode(struct sf_glob_info *sf_g, struct inode *inode,
- PSHFLFSOBJINFO info);
-extern int sf_stat(const char *caller, struct sf_glob_info *sf_g,
- SHFLSTRING * path, PSHFLFSOBJINFO result, int ok_to_fail);
-extern int sf_inode_revalidate(struct dentry *dentry);
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
-# if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
-extern int sf_getattr(const struct path *path, struct kstat *kstat,
- u32 request_mask, unsigned int query_flags);
-# else
-extern int sf_getattr(struct vfsmount *mnt, struct dentry *dentry,
- struct kstat *kstat);
-# endif
-extern int sf_setattr(struct dentry *dentry, struct iattr *iattr);
-#endif
-extern int sf_path_from_dentry(const char *caller, struct sf_glob_info *sf_g,
- struct sf_inode_info *sf_i,
- struct dentry *dentry, SHFLSTRING ** result);
-extern int sf_nlscpy(struct sf_glob_info *sf_g, char *name,
- size_t name_bound_len, const unsigned char *utf8_name,
- size_t utf8_len);
-extern void sf_dir_info_free(struct sf_dir_info *p);
-extern void sf_dir_info_empty(struct sf_dir_info *p);
-extern struct sf_dir_info *sf_dir_info_alloc(void);
-extern int sf_dir_read_all(struct sf_glob_info *sf_g,
- struct sf_inode_info *sf_i, struct sf_dir_info *sf_d,
- SHFLHANDLE handle);
-extern int sf_init_backing_dev(struct sf_glob_info *sf_g);
-extern void sf_done_backing_dev(struct sf_glob_info *sf_g);
+#endif /* < 2.6.0 */
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
-# define STRUCT_STATFS struct statfs
-#else
-# define STRUCT_STATFS struct kstatfs
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0) \
+ && (!defined(RHEL_MAJOR) || RHEL_MAJOR != 6)
+DECLINLINE(void) set_nlink(struct inode *pInode, unsigned int cLinks)
+{
+ pInode->i_nlink = cLinks;
+}
#endif
-int sf_get_volume_info(struct super_block *sb, STRUCT_STATFS * stat);
-#ifdef __cplusplus
-# define CMC_API __attribute__ ((cdecl, regparm (0)))
-#else
-# define CMC_API __attribute__ ((regparm (0)))
-#endif
-#define TRACE() LogFunc(("tracepoint\n"))
+/* global variables */
+extern VBGLSFCLIENT g_SfClient;
+extern spinlock_t g_SfHandleLock;
+extern uint32_t g_uSfLastFunction;
+extern uint64_t g_fSfFeatures;
+
+extern struct inode_operations vbsf_dir_iops;
+extern struct inode_operations vbsf_lnk_iops;
+extern struct inode_operations vbsf_reg_iops;
+extern struct file_operations vbsf_dir_fops;
+extern struct file_operations vbsf_reg_fops;
+extern struct dentry_operations vbsf_dentry_ops;
+extern struct address_space_operations vbsf_reg_aops;
+
+
+/**
+ * VBox specific per-mount (shared folder) information.
+ */
+struct vbsf_super_info {
+ VBGLSFMAP map;
+ struct nls_table *nls;
+ /** Set if the NLS table is UTF-8. */
+ bool fNlsIsUtf8;
+ int uid;
+ int gid;
+ int dmode;
+ int fmode;
+ int dmask;
+ int fmask;
+ /** Maximum number of pages to allow in an I/O buffer with the host.
+ * This applies to read and write operations. */
+ uint32_t cMaxIoPages;
+ /** The default directory buffer size. */
+ uint32_t cbDirBuf;
+ /** The time to live for directory entries in jiffies, zero if disabled. */
+ uint32_t cJiffiesDirCacheTTL;
+ /** The time to live for inode information in jiffies, zero if disabled. */
+ uint32_t cJiffiesInodeTTL;
+ /** The cache and coherency mode. */
+ enum vbsf_cache_mode enmCacheMode;
+ /** Mount tag for VBoxService automounter. @since 6.0 */
+ char szTag[32];
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)
+ /** The backing device info structure. */
+ struct backing_dev_info bdi;
+#endif
+ /** The mount option value for /proc/mounts. */
+ int32_t msTTL;
+ /** The time to live for directory entries in milliseconds, for /proc/mounts. */
+ int32_t msDirCacheTTL;
+ /** The time to live for inode information in milliseconds, for /proc/mounts. */
+ int32_t msInodeTTL;
+};
/* Following casts are here to prevent assignment of void * to
pointers of arbitrary type */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
-# define GET_GLOB_INFO(sb) ((struct sf_glob_info *) (sb)->u.generic_sbp)
-# define SET_GLOB_INFO(sb, sf_g) (sb)->u.generic_sbp = sf_g
+# define VBSF_GET_SUPER_INFO(sb) ((struct vbsf_super_info *)(sb)->u.generic_sbp)
+# define VBSF_SET_SUPER_INFO(sb, a_pSuperInfo) do { (sb)->u.generic_sbp = a_pSuperInfo; } while (0)
#else
-# define GET_GLOB_INFO(sb) ((struct sf_glob_info *) (sb)->s_fs_info)
-# define SET_GLOB_INFO(sb, sf_g) (sb)->s_fs_info = sf_g
+# define VBSF_GET_SUPER_INFO(sb) ((struct vbsf_super_info *)(sb)->s_fs_info)
+# define VBSF_SET_SUPER_INFO(sb, a_pSuperInfo) do { (sb)->s_fs_info = a_pSuperInfo;} while (0)
+#endif
+
+
+/**
+ * For associating inodes with host handles.
+ *
+ * This is necessary for address_space_operations::vbsf_writepage and allows
+ * optimizing stat, lookups and other operations on open files and directories.
+ */
+struct vbsf_handle {
+ /** List entry (head vbsf_inode_info::HandleList). */
+ RTLISTNODE Entry;
+ /** Host file/whatever handle. */
+ SHFLHANDLE hHost;
+ /** VBSF_HANDLE_F_XXX */
+ uint32_t fFlags;
+ /** Reference counter.
+ * Close the handle and free the structure when it reaches zero. */
+ uint32_t volatile cRefs;
+#ifdef VBOX_STRICT
+ /** For strictness checks. */
+ struct vbsf_inode_info *pInodeInfo;
#endif
+};
+
+/** @name VBSF_HANDLE_F_XXX - Handle summary flags (vbsf_handle::fFlags).
+ * @{ */
+#define VBSF_HANDLE_F_READ UINT32_C(0x00000001)
+#define VBSF_HANDLE_F_WRITE UINT32_C(0x00000002)
+#define VBSF_HANDLE_F_APPEND UINT32_C(0x00000004)
+#define VBSF_HANDLE_F_FILE UINT32_C(0x00000010)
+#define VBSF_HANDLE_F_DIR UINT32_C(0x00000020)
+#define VBSF_HANDLE_F_ON_LIST UINT32_C(0x00000080)
+#define VBSF_HANDLE_F_MAGIC_MASK UINT32_C(0xffffff00)
+#define VBSF_HANDLE_F_MAGIC UINT32_C(0x75030700) /**< Maurice Ravel (1875-03-07). */
+#define VBSF_HANDLE_F_MAGIC_DEAD UINT32_C(0x19371228)
+/** @} */
+
+
+/**
+ * VBox specific per-inode information.
+ */
+struct vbsf_inode_info {
+ /** Which file */
+ SHFLSTRING *path;
+ /** Some information was changed, update data on next revalidate */
+ bool force_restat;
+ /** The timestamp (jiffies) where the inode info was last updated. */
+ unsigned long ts_up_to_date;
+ /** The birth time. */
+ RTTIMESPEC BirthTime;
+
+ /** @name Host modification detection stats.
+ * @{ */
+ /** The raw modification time, for mapping invalidation purposes. */
+ RTTIMESPEC ModificationTime;
+ /** Copy of ModificationTime from the last time we wrote to the the file. */
+ RTTIMESPEC ModificationTimeAtOurLastWrite;
+ /** @} */
+
+ /** handle valid if a file was created with vbsf_create_worker until it will
+ * be opened with vbsf_reg_open()
+ * @todo r=bird: figure this one out... */
+ SHFLHANDLE handle;
+
+ /** List of open handles (struct vbsf_handle), protected by g_SfHandleLock. */
+ RTLISTANCHOR HandleList;
+#ifdef VBOX_STRICT
+ uint32_t u32Magic;
+# define SF_INODE_INFO_MAGIC UINT32_C(0x18620822) /**< Claude Debussy */
+# define SF_INODE_INFO_MAGIC_DEAD UINT32_C(0x19180325)
+#endif
+};
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19) || defined(KERNEL_FC6)
/* FC6 kernel 2.6.18, vanilla kernel 2.6.19+ */
-# define GET_INODE_INFO(i) ((struct sf_inode_info *) (i)->i_private)
-# define SET_INODE_INFO(i, sf_i) (i)->i_private = sf_i
+# define VBSF_GET_INODE_INFO(i) ((struct vbsf_inode_info *) (i)->i_private)
+# define VBSF_SET_INODE_INFO(i, sf_i) (i)->i_private = sf_i
#else
/* vanilla kernel up to 2.6.18 */
-# define GET_INODE_INFO(i) ((struct sf_inode_info *) (i)->u.generic_ip)
-# define SET_INODE_INFO(i, sf_i) (i)->u.generic_ip = sf_i
+# define VBSF_GET_INODE_INFO(i) ((struct vbsf_inode_info *) (i)->u.generic_ip)
+# define VBSF_SET_INODE_INFO(i, sf_i) (i)->u.generic_ip = sf_i
#endif
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)
-# define GET_F_DENTRY(f) (f->f_path.dentry)
+extern void vbsf_init_inode(struct inode *inode, struct vbsf_inode_info *sf_i, PSHFLFSOBJINFO info,
+ struct vbsf_super_info *pSuperInfo);
+extern void vbsf_update_inode(struct inode *pInode, struct vbsf_inode_info *pInodeInfo, PSHFLFSOBJINFO pObjInfo,
+ struct vbsf_super_info *pSuperInfo, bool fInodeLocked, unsigned fSetAttrs);
+extern int vbsf_inode_revalidate_worker(struct dentry *dentry, bool fForced, bool fInodeLocked);
+extern int vbsf_inode_revalidate_with_handle(struct dentry *dentry, SHFLHANDLE hHostFile, bool fForced, bool fInodeLocked);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 18)
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
+extern int vbsf_inode_getattr(const struct path *path, struct kstat *kstat, u32 request_mask, unsigned int query_flags);
+# else
+extern int vbsf_inode_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *kstat);
+# endif
+#else /* < 2.5.44 */
+extern int vbsf_inode_revalidate(struct dentry *dentry);
+#endif /* < 2.5.44 */
+extern int vbsf_inode_setattr(struct dentry *dentry, struct iattr *iattr);
+
+
+extern void vbsf_handle_drop_chain(struct vbsf_inode_info *pInodeInfo);
+extern struct vbsf_handle *vbsf_handle_find(struct vbsf_inode_info *pInodeInfo, uint32_t fFlagsSet, uint32_t fFlagsClear);
+extern uint32_t vbsf_handle_release_slow(struct vbsf_handle *pHandle, struct vbsf_super_info *pSuperInfo,
+ const char *pszCaller);
+extern void vbsf_handle_append(struct vbsf_inode_info *pInodeInfo, struct vbsf_handle *pHandle);
+
+/**
+ * Releases a handle.
+ *
+ * @returns New reference count.
+ * @param pHandle The handle to release.
+ * @param pSuperInfo The info structure for the shared folder associated
+ * with the handle.
+ * @param pszCaller The caller name (for logging failures).
+ */
+DECLINLINE(uint32_t) vbsf_handle_release(struct vbsf_handle *pHandle, struct vbsf_super_info *pSuperInfo, const char *pszCaller)
+{
+ uint32_t cRefs;
+
+ Assert((pHandle->fFlags & VBSF_HANDLE_F_MAGIC_MASK) == VBSF_HANDLE_F_MAGIC);
+ Assert(pHandle->pInodeInfo);
+ Assert(pHandle->pInodeInfo && pHandle->pInodeInfo->u32Magic == SF_INODE_INFO_MAGIC);
+
+ cRefs = ASMAtomicDecU32(&pHandle->cRefs);
+ Assert(cRefs < _64M);
+ if (cRefs)
+ return cRefs;
+ return vbsf_handle_release_slow(pHandle, pSuperInfo, pszCaller);
+}
+
+
+/**
+ * VBox specific information for a regular file.
+ */
+struct vbsf_reg_info {
+ /** Handle tracking structure.
+ * @note Must be first! */
+ struct vbsf_handle Handle;
+};
+
+uint32_t vbsf_linux_oflags_to_vbox(unsigned fLnxOpen, uint32_t *pfHandle, const char *pszCaller);
+
+
+/**
+ * VBox specific information for an open directory.
+ */
+struct vbsf_dir_info {
+ /** Handle tracking structure.
+ * @note Must be first! */
+ struct vbsf_handle Handle;
+ /** Semaphore protecting everything below. */
+ struct semaphore Lock;
+ /** A magic number (VBSF_DIR_INFO_MAGIC). */
+ uint32_t u32Magic;
+ /** Size of the buffer for directory entries. */
+ uint32_t cbBuf;
+ /** Buffer for directory entries on the physical heap. */
+ PSHFLDIRINFO pBuf;
+ /** Number of valid bytes in the buffer. */
+ uint32_t cbValid;
+ /** Number of entries left in the buffer. */
+ uint32_t cEntriesLeft;
+ /** The position of the next entry. Incremented by one for each entry. */
+ loff_t offPos;
+ /** The next entry. */
+ PSHFLDIRINFO pEntry;
+ /** Set if there are no more files. */
+ bool fNoMoreFiles;
+};
+
+/** Magic number for vbsf_dir_info::u32Magic (Robert Anson Heinlein). */
+#define VBSF_DIR_INFO_MAGIC UINT32_C(0x19070707)
+/** Value of vbsf_dir_info::u32Magic when freed. */
+#define VBSF_DIR_INFO_MAGIC_DEAD UINT32_C(0x19880508)
+
+
+/**
+ * Sets the update-jiffies value for a dentry.
+ *
+ * This is used together with vbsf_super_info::cJiffiesDirCacheTTL to reduce
+ * re-validation of dentry structures while walking.
+ *
+ * This used to be living in d_time, but since 4.9.0 that seems to have become
+ * unfashionable and d_fsdata is now used to for this purpose. We do this all
+ * the way back, since d_time seems only to have been used by the file system
+ * specific code (at least going back to 2.4.0).
+ */
+DECLINLINE(void) vbsf_dentry_set_update_jiffies(struct dentry *pDirEntry, unsigned long uToSet)
+{
+ /*SFLOG3(("vbsf_dentry_set_update_jiffies: %p: %lx -> %#lx\n", pDirEntry, (unsigned long)pDirEntry->d_fsdata, uToSet));*/
+ pDirEntry->d_fsdata = (void *)uToSet;
+}
+
+/**
+ * Get the update-jiffies value for a dentry.
+ */
+DECLINLINE(unsigned long) vbsf_dentry_get_update_jiffies(struct dentry *pDirEntry)
+{
+ return (unsigned long)pDirEntry->d_fsdata;
+}
+
+/**
+ * Invalidates the update TTL for the given directory entry so that it is
+ * revalidate the next time it is used.
+ * @param pDirEntry The directory entry cache entry to invalidate.
+ */
+DECLINLINE(void) vbsf_dentry_invalidate_ttl(struct dentry *pDirEntry)
+{
+ vbsf_dentry_set_update_jiffies(pDirEntry, jiffies - INT32_MAX / 2);
+}
+
+/**
+ * Increase the time-to-live of @a pDirEntry and all ancestors.
+ * @param pDirEntry The directory entry cache entry which ancestors
+ * we should increase the TTL for.
+ */
+DECLINLINE(void) vbsf_dentry_chain_increase_ttl(struct dentry *pDirEntry)
+{
+#ifdef VBOX_STRICT
+ struct super_block * const pSuper = pDirEntry->d_sb;
+#endif
+ unsigned long const uToSet = jiffies;
+ do {
+ Assert(pDirEntry->d_sb == pSuper);
+ vbsf_dentry_set_update_jiffies(pDirEntry, uToSet);
+ pDirEntry = pDirEntry->d_parent;
+ } while (!IS_ROOT(pDirEntry));
+}
+
+/**
+ * Increase the time-to-live of all ancestors.
+ * @param pDirEntry The directory entry cache entry which ancestors
+ * we should increase the TTL for.
+ */
+DECLINLINE(void) vbsf_dentry_chain_increase_parent_ttl(struct dentry *pDirEntry)
+{
+ Assert(!pDirEntry->d_parent || pDirEntry->d_parent->d_sb == pDirEntry->d_sb);
+ pDirEntry = pDirEntry->d_parent;
+ if (pDirEntry)
+ vbsf_dentry_chain_increase_ttl(pDirEntry);
+}
+
+/** Macro for getting the dentry for a struct file. */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0)
+# define VBSF_GET_F_DENTRY(f) file_dentry(f)
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
+# define VBSF_GET_F_DENTRY(f) (f->f_path.dentry)
#else
-# define GET_F_DENTRY(f) (f->f_dentry)
+# define VBSF_GET_F_DENTRY(f) (f->f_dentry)
#endif
+extern int vbsf_stat(const char *caller, struct vbsf_super_info *pSuperInfo, SHFLSTRING * path, PSHFLFSOBJINFO result,
+ int ok_to_fail);
+extern int vbsf_path_from_dentry(struct vbsf_super_info *pSuperInfo, struct vbsf_inode_info *sf_i, struct dentry *dentry,
+ SHFLSTRING ** result, const char *caller);
+extern int vbsf_nlscpy(struct vbsf_super_info *pSuperInfo, char *name, size_t name_bound_len,
+ const unsigned char *utf8_name, size_t utf8_len);
+extern int vbsf_nls_to_shflstring(struct vbsf_super_info *pSuperInfo, const char *pszNls, PSHFLSTRING *ppString);
+
+
+/**
+ * Converts Linux access permissions to VBox ones (mode & 0777).
+ *
+ * @note Currently identical.
+ * @sa sf_access_permissions_to_linux
+ */
+DECLINLINE(uint32_t) sf_access_permissions_to_vbox(int fAttr)
+{
+ /* Access bits should be the same: */
+ AssertCompile(RTFS_UNIX_IRUSR == S_IRUSR);
+ AssertCompile(RTFS_UNIX_IWUSR == S_IWUSR);
+ AssertCompile(RTFS_UNIX_IXUSR == S_IXUSR);
+ AssertCompile(RTFS_UNIX_IRGRP == S_IRGRP);
+ AssertCompile(RTFS_UNIX_IWGRP == S_IWGRP);
+ AssertCompile(RTFS_UNIX_IXGRP == S_IXGRP);
+ AssertCompile(RTFS_UNIX_IROTH == S_IROTH);
+ AssertCompile(RTFS_UNIX_IWOTH == S_IWOTH);
+ AssertCompile(RTFS_UNIX_IXOTH == S_IXOTH);
+
+ return fAttr & RTFS_UNIX_ALL_ACCESS_PERMS;
+}
+
#endif /* !GA_INCLUDED_SRC_linux_sharedfolders_vfsmod_h */
diff --git a/ubuntu/vbox/vboxvideo/Makefile b/ubuntu/vbox/vboxvideo/Makefile
index f87976a8c7e9..1be76d9e15d6 100644
--- a/ubuntu/vbox/vboxvideo/Makefile
+++ b/ubuntu/vbox/vboxvideo/Makefile
@@ -20,33 +20,45 @@ KBUILD_EXTMOD=${srctree}/ubuntu/vbox
# Linux kbuild sets this to our source directory if we are called from there
obj ?= $(CURDIR)
-include $(obj)/Makefile.include.header
-
-BUILD =
+include $(obj)/Makefile-header.gmk
+VBOXDRM_DIR = $(VBOX_MODULE_SRC_DIR)
# We want to build on Linux 3.11 and later and on all EL 7 kernels.
-ifneq ($(filter-out 1.% 2.% 3.0.% 3.1.% 3.2.% 3.3.% 3.4.% 3.5.% 3.6.% 3.7.% \
- 3.8.% 3.9.% 3.10.%,$(KERN_VER)),)
- BUILD = 1
+VBOX_BUILD =
+ifneq ($(filter-out 1.% 2.% 3.0.% 3.1.% 3.2.% 3.3.% 3.4.% 3.5.% 3.6.% 3.7.% 3.8.% 3.9.% 3.10.%,$(KERN_VER)),)
+ VBOX_BUILD = 1
endif
ifeq ($(filter-out %.el7.x86_64,$(KERN_VER)),)
- BUILD = 1
+ VBOX_BUILD = 1
endif
-ifneq ($(BUILD),)
+ifneq ($(VBOX_BUILD),)
-MOD_NAME = vboxvideo
-MOD_OBJS = hgsmi_base.o \
- modesetting.o vbox_drv.o vbox_fb.o vbox_irq.o vbox_main.o \
- vbox_mode.o vbox_ttm.o vbva_base.o vbox_prime.o vbox_hgsmi.o
-MOD_INCL = -I$(KBUILD_EXTMOD) -Iinclude/drm
+VBOXMOD_NAME = vboxvideo
+VBOXMOD_OBJS = \
+ hgsmi_base.o \
+ modesetting.o \
+ vbox_drv.o \
+ vbox_fb.o \
+ vbox_irq.o \
+ vbox_main.o \
+ vbox_mode.o \
+ vbox_ttm.o \
+ vbva_base.o \
+ vbox_prime.o \
+ vbox_hgsmi.o
+VBOXMOD_INCL = \
+ $(VBOXDRM_DIR) \
+ include/drm
+## @todo r=bird: -Iinclude/drm is ambigious.
-include $(obj)/Makefile.include.footer
+include $(obj)/Makefile-footer.gmk
-else # ! wildcard $(KERN_INCL)/drm/drm_rect.h
+else # !VBOX_BUILD
all:
install:
clean:
-endif # ! wildcard $(KERN_INCL)/drm/drm_rect.h
+endif # !VBOX_BUILD
+
diff --git a/ubuntu/vbox/vboxvideo/Makefile-footer.gmk b/ubuntu/vbox/vboxvideo/Makefile-footer.gmk
new file mode 100644
index 000000000000..adc2c2ebaaa1
--- /dev/null
+++ b/ubuntu/vbox/vboxvideo/Makefile-footer.gmk
@@ -0,0 +1,128 @@
+# $Id: Makefile-footer.gmk $
+## @file
+# VirtualBox Guest Additions kernel module Makefile, common parts.
+#
+# See Makefile-header.gmk for details of how to use this.
+#
+
+#
+# Copyright (C) 2006-2019 Oracle Corporation
+#
+# This file is part of VirtualBox Open Source Edition (OSE), as
+# available from http://www.virtualbox.org. This file is free software;
+# you can redistribute it and/or modify it under the terms of the GNU
+# General Public License (GPL) as published by the Free Software
+# Foundation, in version 2 as it comes in the "COPYING" file of the
+# VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+# hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+#
+
+VBOXMOD_0_TARGET = $(VBOXMOD_NAME)
+
+KBUILD_VERBOSE ?= 1 # Variable belongs to our kBuild, not the linux one.
+VBOX_LNX_VERBOSE = $(if $(KBUILD_VERBOSE),1,)
+
+#
+# Compiler options
+#
+VBOXMOD_0_KFLAGS := -D__KERNEL__ -DMODULE -DRT_WITHOUT_PRAGMA_ONCE $(addprefix -D,$(VBOXMOD_DEFS))
+ifeq ($(BUILD_TARGET_ARCH),amd64)
+VBOXMOD_0_KFLAGS += -DRT_ARCH_AMD64
+else
+VBOXMOD_0_KFLAGS += -DRT_ARCH_X86
+endif
+
+ifeq ($(BUILD_TYPE),debug)
+# The -Wno-array-bounds is because of a bug in gcc 4.something, see
+# https://sourceware.org/bugzilla/show_bug.cgi?id=10001
+ VBOXMOD_0_KFLAGS += -DDEBUG -DDEBUG_$(subst $(subst _, ,_),_,$(USERNAME)) -DDEBUG_USERNAME=$(subst $(subst _, ,_),_,$(USERNAME))
+ ifeq ($(shell expr $(KERN_VER) : '[23]\.'),0)
+ VBOXMOD_0_KFLAGS += -Werror -Wall -Wno-array-bounds
+ endif
+endif
+
+ifeq ($(VBOX_KERN_GROKS_EXTMOD),)
+#
+# Pre 2.6.6
+#
+# Note: While pre 2.6.6 kernels could also do "proper" builds from kbuild, the
+# make script needed to support it was somewhat different from 2.6. Since this
+# script works and pre-2.6.6 is not a moving target we will not try do do things
+# the "proper" way.
+#
+VBOXMOD_EXT := o
+
+ ifeq ($(BUILD_TARGET_ARCH),amd64)
+VBOXMOD_0_KFLAGS += -mcmodel=kernel
+ endif
+ ifeq ($(KERN_VERSION),24)
+VBOXMOD_0_KFLAGS += -DVBOX_LINUX_2_4
+ endif
+
+CFLAGS := -O2 $(VBOXMOD_CFLAGS) $(addprefix -I,$(KERN_INCL) $(VBOXMOD_INCL)) $(VBOXMOD_0_KFLAGS) $(KDEBUG)
+
+# 2.4 Module linking
+$(VBOXMOD_0_TARGET).$(VBOXMOD_EXT): $(VBOXMOD_OBJS)
+ $(LD) -o $@ -r $(VBOXMOD_OBJS)
+
+all: $(VBOXMOD_0_TARGET)
+$(VBOXMOD_0_TARGET): $(VBOXMOD_0_TARGET).$(VBOXMOD_EXT)
+
+install: $(VBOXMOD_0_TARGET)
+ @mkdir -p $(MODULE_DIR); \
+ install -m 0644 -o root -g root $(VBOXMOD_0_TARGET).$(VBOXMOD_EXT) $(MODULE_DIR); \
+ PATH="$(PATH):/bin:/sbin" depmod -a; sync
+
+clean:
+ for f in $(sort $(dir $(VBOXMOD_OBJS))); do rm -f $$f/*.o $$f/.*.cmd $$f/.*.flags; done
+ rm -rf .$(VBOXMOD_NAME)* .tmp_ver* $(VBOXMOD_NAME).* Modules.symvers modules.order
+
+.PHONY: all $(VBOXMOD_0_TARGET) install clean
+
+else # VBOX_KERN_GROKS_EXTMOD
+#
+# 2.6.6 and later
+#
+VBOXMOD_EXT := ko
+
+# build defs
+EXTRA_CFLAGS += $(VBOXMOD_CFLAGS) $(addprefix -I,$(KERN_INCL) $(VBOXMOD_INCL)) $(VBOXMOD_0_KFLAGS) $(KDEBUG)
+$(VBOXMOD_0_TARGET)-y := $(VBOXMOD_OBJS)
+obj-m += $(VBOXMOD_0_TARGET).o
+
+# Trigger parallel make job.
+JOBS := $(shell (getconf _NPROCESSORS_ONLN || grep -Ec '^processor|^CPU[0-9]' /proc/cpuinfo) 2>/dev/null)
+ ifeq ($(JOBS),0)
+ override JOBS := 1
+ endif
+
+# rules:
+all: $(VBOXMOD_0_TARGET)
+
+# OL/UEK: disable module signing for external modules -- we don't have any private key
+$(VBOXMOD_0_TARGET):
+ifneq ($(VBOX_KERN_GROKS_SUBDIRS),)
+ $(MAKE) V=$(VBOX_LNX_VERBOSE) CONFIG_MODULE_SIG= -C $(KERN_DIR) SUBDIRS=$(CURDIR) SRCROOT=$(CURDIR) $(if $(JOBS),-j$(JOBS),) modules
+else
+ $(MAKE) V=$(VBOX_LNX_VERBOSE) CONFIG_MODULE_SIG= -C $(KERN_DIR) M=$(CURDIR) SRCROOT=$(CURDIR) $(if $(JOBS),-j$(JOBS),) modules
+endif
+
+install: $(VBOXMOD_0_TARGET)
+ifneq ($(VBOX_KERN_GROKS_SUBDIRS),)
+ $(MAKE) V=$(VBOX_LNX_VERBOSE) CONFIG_MODULE_SIG= -C $(KERN_DIR) SUBDIRS=$(CURDIR) SRCROOT=$(CURDIR) INSTALL_MOD_PATH=$(INSTALL_MOD_PATH) INSTALL_MOD_DIR=$(INSTALL_MOD_DIR) modules_install
+else
+ $(MAKE) V=$(VBOX_LNX_VERBOSE) CONFIG_MODULE_SIG= -C $(KERN_DIR) M=$(CURDIR) SRCROOT=$(CURDIR) INSTALL_MOD_PATH=$(INSTALL_MOD_PATH) INSTALL_MOD_DIR=$(INSTALL_MOD_DIR) modules_install
+endif
+
+modules_install: install
+
+clean:
+ifneq ($(VBOX_KERN_GROKS_SUBDIRS),)
+ $(MAKE) V=$(VBOX_LNX_VERBOSE) CONFIG_MODULE_SIG= -C $(KERN_DIR) SUBDIRS=$(CURDIR) SRCROOT=$(CURDIR) clean
+else
+ $(MAKE) V=$(VBOX_LNX_VERBOSE) CONFIG_MODULE_SIG= -C $(KERN_DIR) M=$(CURDIR) SRCROOT=$(CURDIR) clean
+endif
+
+.PHONY: all $(VBOXMOD_0_TARGET) install modules_install clean
+endif # VBOX_KERN_GROKS_EXTMOD
+
diff --git a/ubuntu/vbox/vboxsf/Makefile.include.header b/ubuntu/vbox/vboxvideo/Makefile-header.gmk
similarity index 51%
rename from ubuntu/vbox/vboxsf/Makefile.include.header
rename to ubuntu/vbox/vboxvideo/Makefile-header.gmk
index 8b0434bd508e..456d2789ec30 100644
--- a/ubuntu/vbox/vboxsf/Makefile.include.header
+++ b/ubuntu/vbox/vboxvideo/Makefile-header.gmk
@@ -1,4 +1,4 @@
-# $Id: Makefile.include.header $
+# $Id: Makefile-header.gmk $
## @file
# VirtualBox Guest Additions kernel module Makefile, common parts.
#
@@ -26,16 +26,15 @@
# build as part of the Guest Additions. The intended way of doing this is as
# follows:
#
-# # Linux kbuild sets this to our source directory if we are called from
-# # there
+# # Linux kbuild sets this to our source directory if we are called from there
# obj ?= $(CURDIR)
-# include $(obj)/Makefile.include.header
-# MOD_NAME = <name of the module to be built, without extension>
-# MOD_OBJS = <list of object files which should be included>
-# MOD_DEFS = <any additional defines which this module needs>
-# MOD_INCL = <any additional include paths which this module needs>
-# MOD_CFLAGS = <any additional CFLAGS which this module needs>
-# include $(obj)/Makefile.include.footer
+# include $(obj)/Makefile-header.gmk
+# VBOXMOD_NAME = <name of the module to be built, without extension>
+# VBOXMOD_OBJS = <list of object files which should be included>
+# VBOXMOD_DEFS = <any additional defines which this module needs>
+# VBOXMOD_INCL = <any additional include paths which this module needs>
+# VBOXMOD_CFLAGS = <any additional CFLAGS which this module needs>
+# include $(obj)/Makefile-footer.gmk
#
# The kmk kBuild define KBUILD_TARGET_ARCH is available.
#
@@ -79,7 +78,9 @@ ifeq ($(BUILD_TYPE),)
BUILD_TYPE := release
else
ifneq ($(BUILD_TYPE),release)
- $(warning Using BUILD_TYPE='$(BUILD_TYPE)' from the $(origin BUILD_TYPE).)
+ ifndef VBOX_KERN_QUIET
+ $(warning Using BUILD_TYPE='$(BUILD_TYPE)' from the $(origin BUILD_TYPE).)
+ endif
endif
endif
ifeq ($(USERNAME),)
@@ -108,14 +109,35 @@ ifeq ($(KERNELRELEASE),)
$(error The kernel build folder path must end in <version>/build, or the variable KERN_VER must be set)
endif
endif
- KERN_VER ?= $(shell uname -r)
+ KERN_VER ?= $(shell uname -r)
endif
- # guess kernel major version (24 or later)
- ifeq ($(shell if grep '"2\.4\.' /lib/modules/$(KERN_VER)/build/include/linux/version.h > /dev/null 2>&1; then echo yes; fi),yes)
+ ifeq ($(KERN_DIR),)
+KERN_DIR := $(srctree)
+ endif
+
+ # Is this 2.4 or < 2.6.6? The UTS_RELEASE "2.x.y.z" define is present in the header until 2.6.1x something.
+ ifeq ($(shell if grep '"2\.4\.' $(KERN_DIR)/include/linux/version.h > /dev/null 2>&1; then echo yes; fi),yes)
KERN_VERSION := 24
+ VBOX_KERN_GROKS_EXTMOD :=
else
KERN_VERSION := 26
+ VBOX_KERN_GROKS_EXTMOD := yes
+ ifeq ($(shell if grep '"2\.6\.[012345][."]' $(KERN_DIR)/include/linux/version.h > /dev/null 2>&1; then echo yes; fi),yes)
+ VBOX_KERN_GROKS_EXTMOD :=
+ endif
+ VBOX_KERN_GROKS_SUBDIRS :=
+ ifeq ($(shell if grep '"[432]\.' $(KERN_DIR)/include/linux/version.h > /dev/null 2>&1; then echo yes; fi),yes)
+ VBOX_KERN_GROKS_SUBDIRS := yes
+ endif
+ endif
+
+ #
+ # Hack for Ubuntu 4.10 where we determine 2.6.8.1-3-generic-amd64 here, but the
+ # the next invocation (M/SUBDIR) ends up with KERNELRELEASE=2.6.8.1-3.
+ #
+ ifeq ($(shell if grep '"[2]\.' $(KERN_DIR)/include/linux/version.h > /dev/null 2>&1; then echo yes; fi),yes)
+ export KERN_VER KERN_DIR
endif
else # neq($(KERNELRELEASE),)
@@ -125,22 +147,39 @@ else # neq($(KERNELRELEASE),)
#
# guess kernel version (24 or 26)
- ifeq ($(shell if echo "$(VERSION).$(PATCHLEVEL)." | grep '2\.4\.' > /dev/null; then echo yes; fi),yes)
+ ifeq ($(VERSION).$(PATCHLEVEL),2.4)
KERN_VERSION := 24
+ VBOX_KERN_GROKS_EXTMOD :=
else
KERN_VERSION := 26
+ VBOX_KERN_GROKS_EXTMOD := yes
+ ifeq ($(VERSION).$(PATCHLEVEL),2.6)
+ ifeq ($(findstring @$(SUBLEVEL)@, at 0@1 at 2@3 at 4@5@),@$(SUBLEVEL)@)
+ VBOX_KERN_GROKS_EXTMOD :=
+ endif
+ endif
+ VBOX_KERN_GROKS_SUBDIRS :=
+ ifeq ($(VERSION),2)
+ VBOX_KERN_GROKS_SUBDIRS := yes
+ endif
+ ifeq ($(VERSION),3)
+ VBOX_KERN_GROKS_SUBDIRS := yes
+ endif
+ ifeq ($(VERSION),4)
+ VBOX_KERN_GROKS_SUBDIRS := yes
+ endif
endif
KERN_VER := $(KERNELRELEASE)
+ ifeq ($(KERN_DIR),)
+KERN_DIR := $(srctree)
+ endif
endif # neq($(KERNELRELEASE),)
# Kernel build folder
-ifeq ($(KERN_DIR),)
-KERN_DIR := $(srctree)
-endif
ifneq ($(shell if test -d $(KERN_DIR); then echo yes; fi),yes)
- $(error Error: unable to find the headers of the Linux kernel to build against. \
+ $(error Error: unable to find the headers of the Linux kernel to build against (KERN_DIR=$(KERN_DIR)). \
Specify KERN_VER=<version> (currently $(KERN_VER)) and run Make again)
endif
# Kernel include folder
@@ -149,12 +188,59 @@ KERN_INCL := $(KERN_DIR)/include
INSTALL_MOD_DIR ?= misc
MODULE_DIR := $(INSTALL_MOD_PATH)/lib/modules/$(KERN_VER)/$(INSTALL_MOD_DIR)
+#
+# The KBUILD_EXTMOD variable is used by 2.6.6 and later when build external
+# modules (see https://lwn.net/Articles/79984/). It will be set to SUBDIRS
+# or M by the linux kernel makefile. We fake it here for older kernels.
+#
+## @todo Drop this KBUILD_EXTMOD glue once it has been removed from all our makefiles (see sharedfolders).
+ifndef CURDIR # for make < v3.79
+ CURDIR := $(shell pwd)
+endif
+ifndef KBUILD_EXTMOD
+ KBUILD_EXTMOD := $(CURDIR)
+endif
+
+
+# For VBOX_GCC_CHECK_CC
+VBOX_CLOSEPAR := )
+VBOX_DOLLAR := $$
+## Modified VBOX_GCC_CHECK_EX_CC_CXX macro from /Config.kmk.
+# @param 1 The option to test for.
+# @param 2 The return value when supported.
+# @param 3 The return value when NOT supported.
+VBOX_GCC_CHECK_CC = $(shell \
+ > /tmp/$(VBOX_DOLLAR)$(VBOX_DOLLAR).check.c; \
+ if $(CC) $(subst -Wno-,-W,$(1)) -Werror -c -o /dev/null /tmp/$(VBOX_DOLLAR)$(VBOX_DOLLAR).check.c > /dev/null 2>&1; then \
+ case "`LC_ALL=C $(CC) $(subst -Wno-,-W,$(1)) -Werror -c -o /dev/null /tmp/$(VBOX_DOLLAR)$(VBOX_DOLLAR).check.c 2>&1`" in \
+ "error: unknown warning option"*$(VBOX_CLOSEPAR) echo "$(3)";; \
+ *$(VBOX_CLOSEPAR) echo "$(2)";; \
+ esac; \
+ else echo "$(3)"; fi; \
+ rm -f /tmp/$(VBOX_DOLLAR)$(VBOX_DOLLAR).check.c; )
+
+#
+# Guess the module directory ASSUMING that this file is located in that directory.
+# Note! The special MAKEFILE_LIST variable was introduced in GNU make 3.80.
+#
+ifdef MAKEFILE_LIST
+ VBOX_MODULE_SRC_DIR := $(dir $(word $(words $(MAKEFILE_LIST)),$(MAKEFILE_LIST)))
+else
+ VBOX_MODULE_SRC_DIR := $(CURDIR)/
+endif
+
+
# debug - show guesses.
ifdef DEBUG
+ ifndef VBOX_KERN_QUIET
$(warning dbg: INSTALL_MOD_PATH = $(INSTALL_MOD_PATH))
$(warning dbg: INSTALL_MOD_DIR = $(INSTALL_MOD_DIR))
$(warning dbg: KERN_DIR = $(KERN_DIR))
$(warning dbg: KERN_INCL = $(KERN_INCL))
$(warning dbg: KERN_VERSION = $(KERN_VERSION))
$(warning dbg: MODULE_DIR = $(MODULE_DIR))
+$(warning dbg: KBUILD_EXTMOD = $(KBUILD_EXTMOD))
+$(warning dbg: VBOX_MODULE_SRC_DIR = $(VBOX_MODULE_SRC_DIR))
+ endif
endif
+
diff --git a/ubuntu/vbox/vboxvideo/Makefile.include.footer b/ubuntu/vbox/vboxvideo/Makefile.include.footer
deleted file mode 100644
index 7e04c3153eaa..000000000000
--- a/ubuntu/vbox/vboxvideo/Makefile.include.footer
+++ /dev/null
@@ -1,117 +0,0 @@
-# $Id: Makefile.include.footer $
-## @file
-# VirtualBox Guest Additions kernel module Makefile, common parts.
-#
-# See Makefile.include.header for details of how to use this.
-#
-
-#
-# Copyright (C) 2006-2019 Oracle Corporation
-#
-# This file is part of VirtualBox Open Source Edition (OSE), as
-# available from http://www.virtualbox.org. This file is free software;
-# you can redistribute it and/or modify it under the terms of the GNU
-# General Public License (GPL) as published by the Free Software
-# Foundation, in version 2 as it comes in the "COPYING" file of the
-# VirtualBox OSE distribution. VirtualBox OSE is distributed in the
-# hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
-#
-
-# override is required by the Debian guys
-override MODULE = $(MOD_NAME)
-OBJS = $(MOD_OBJS)
-
-KBUILD_VERBOSE ?= 1
-LINUX_VERBOSE = $(if $(KBUILD_VERBOSE),1,)
-
-#
-# Compiler options
-#
-ifndef INCL
- INCL := $(addprefix -I,$(KERN_INCL) $(EXTRA_INCL))
- ifndef KBUILD_EXTMOD
- KBUILD_EXTMOD := $(shell pwd)
- endif
- INCL += $(MOD_INCL)
- export INCL
-endif
-KFLAGS := -D__KERNEL__ -DMODULE -DRT_WITHOUT_PRAGMA_ONCE $(MOD_DEFS)
-ifeq ($(BUILD_TYPE),debug)
-# The -Wno-array-bounds is because of a bug in gcc 4.something, see
-# https://sourceware.org/bugzilla/show_bug.cgi?id=10001
- KFLAGS += -DDEBUG -DDEBUG_$(subst $(subst _, ,_),_,$(USERNAME)) -DDEBUG_USERNAME=$(subst $(subst _, ,_),_,$(USERNAME))
- ifeq ($(shell expr $(KERN_VER) : '[23]\.'),0)
- KFLAGS += -Werror -Wall -Wno-array-bounds
- endif
-endif
-
-ifeq ($(KERN_VERSION), 24)
-#
-# 2.4
-#
-
-# Note: while 2.4 kernels could also do "proper" builds from kbuild, the make
-# script needed to support it was somewhat different from 2.6. Since this
-# script works and 2.4 is not a moving target we will not try do do things the
-# "proper" way.
-
-ifeq ($(BUILD_TARGET_ARCH),amd64)
- KFLAGS += -mcmodel=kernel
-endif
-
-CFLAGS := -O2 -DVBOX_LINUX_2_4 $(MOD_CFLAGS) $(INCL) $(KFLAGS) $(MOD_EXTRA) $(KDEBUG)
-MODULE_EXT := o
-
-# 2.4 Module linking
-$(MODULE).o: $(OBJS)
- $(LD) -o $@ -r $(OBJS)
-
-.PHONY: $(MODULE)
-all: $(MODULE)
-$(MODULE): $(MODULE).o
-
-install: $(MODULE)
- @mkdir -p $(MODULE_DIR); \
- install -m 0644 -o root -g root $(MODULE).$(MODULE_EXT) $(MODULE_DIR); \
- PATH="$(PATH):/bin:/sbin" depmod -a; sync
-
-clean:
- for f in $(sort $(dir $(OBJS))); do rm -f $$f/*.o $$f/.*.cmd $$f/.*.flags; done
- rm -rf .$(MOD_NAME)* .tmp_ver* $(MOD_NAME).* Modules.symvers modules.order
-
-else # ! $(KERN_VERSION), 24
-#
-# 2.6 and later
-#
-
-MODULE_EXT := ko
-
-$(MODULE)-y := $(OBJS)
-
-# build defs
-EXTRA_CFLAGS += $(MOD_CFLAGS) $(INCL) $(KFLAGS) $(MOD_EXTRA) $(KDEBUG)
-
-.PHONY: $(MODULE)
-all: $(MODULE)
-
-obj-m += $(MODULE).o
-
-JOBS := $(shell (getconf _NPROCESSORS_ONLN || grep -Ec '^processor|^CPU[0-9]' /proc/cpuinfo) 2>/dev/null)
-ifeq ($(JOBS),0)
- override JOBS := 1
-endif
-
-# OL/UEK: disable module signing for external modules -- we don't have any private key
-$(MODULE):
- $(MAKE) V=$(LINUX_VERBOSE) CONFIG_MODULE_SIG= -C $(KERN_DIR) SUBDIRS=$(CURDIR) SRCROOT=$(CURDIR) $(if $(JOBS),-j$(JOBS),) modules
-
-install: $(MODULE)
- $(MAKE) V=$(LINUX_VERBOSE) CONFIG_MODULE_SIG= -C $(KERN_DIR) SUBDIRS=$(CURDIR) SRCROOT=$(CURDIR) INSTALL_MOD_PATH=$(INSTALL_MOD_PATH) INSTALL_MOD_DIR=$(INSTALL_MOD_DIR) modules_install
-
-modules_install: install
-
-clean:
- $(MAKE) V=$(LINUX_VERBOSE) CONFIG_MODULE_SIG= -C $(KERN_DIR) SUBDIRS=$(CURDIR) SRCROOT=$(CURDIR) clean
-
-.PHONY: $(MODULE) install modules_install clean
-endif
diff --git a/ubuntu/vbox/vboxvideo/revision-generated.h b/ubuntu/vbox/vboxvideo/revision-generated.h
index a787df83ff85..fbe696a898a3 100644
--- a/ubuntu/vbox/vboxvideo/revision-generated.h
+++ b/ubuntu/vbox/vboxvideo/revision-generated.h
@@ -1 +1 @@
-#define VBOX_SVN_REV 128164
+#define VBOX_SVN_REV 129722
diff --git a/ubuntu/vbox/vboxvideo/vbox_drv.c b/ubuntu/vbox/vboxvideo/vbox_drv.c
index b759ee0aeb67..ea5a151b660e 100644
--- a/ubuntu/vbox/vboxvideo/vbox_drv.c
+++ b/ubuntu/vbox/vboxvideo/vbox_drv.c
@@ -37,6 +37,10 @@
#include "vbox_drv.h"
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0)
+#include <drm/drm_probe_helper.h>
+#endif
+
#include "version-generated.h"
#include "revision-generated.h"
@@ -262,7 +266,10 @@ static void vbox_master_drop(struct drm_device *dev, struct drm_file *file_priv)
static struct drm_driver driver = {
.driver_features =
- DRIVER_MODESET | DRIVER_GEM | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
+ DRIVER_MODESET | DRIVER_GEM | DRIVER_HAVE_IRQ |
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 1, 0)
+ DRIVER_IRQ_SHARED |
+#endif
DRIVER_PRIME,
.dev_priv_size = 0,
diff --git a/ubuntu/vbox/vboxvideo/vbox_drv.h b/ubuntu/vbox/vboxvideo/vbox_drv.h
index dc282bec3b59..3dc1c140af45 100644
--- a/ubuntu/vbox/vboxvideo/vbox_drv.h
+++ b/ubuntu/vbox/vboxvideo/vbox_drv.h
@@ -109,7 +109,9 @@ static inline void drm_gem_object_put_unlocked(struct drm_gem_object *obj)
{
drm_gem_object_unreference_unlocked(obj);
}
+#endif
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0) && !defined(RHEL_75)
static inline void drm_gem_object_put(struct drm_gem_object *obj)
{
drm_gem_object_unreference(obj);
diff --git a/ubuntu/vbox/vboxvideo/vbox_irq.c b/ubuntu/vbox/vboxvideo/vbox_irq.c
index f80f90488799..a252d118d5fd 100644
--- a/ubuntu/vbox/vboxvideo/vbox_irq.c
+++ b/ubuntu/vbox/vboxvideo/vbox_irq.c
@@ -28,7 +28,11 @@
*/
#include "vbox_drv.h"
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 1, 0)
#include <drm/drm_crtc_helper.h>
+#else
+#include <drm/drm_probe_helper.h>
+#endif
#include "vboxvideo.h"
static void vbox_clear_irq(void)
diff --git a/ubuntu/vbox/vboxvideo/vbox_main.c b/ubuntu/vbox/vboxvideo/vbox_main.c
index 6155f0ee87a1..e48204d057b1 100644
--- a/ubuntu/vbox/vboxvideo/vbox_main.c
+++ b/ubuntu/vbox/vboxvideo/vbox_main.c
@@ -595,24 +595,18 @@ int vbox_dumb_destroy(struct drm_file *file,
}
#endif
-static void vbox_bo_unref(struct vbox_bo **bo)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)
+static void ttm_bo_put(struct ttm_buffer_object *bo)
{
- struct ttm_buffer_object *tbo;
-
- if ((*bo) == NULL)
- return;
-
- tbo = &((*bo)->bo);
- ttm_bo_unref(&tbo);
- if (!tbo)
- *bo = NULL;
+ ttm_bo_unref(&bo);
}
+#endif
void vbox_gem_free_object(struct drm_gem_object *obj)
{
struct vbox_bo *vbox_bo = gem_to_vbox_bo(obj);
- vbox_bo_unref(&vbox_bo);
+ ttm_bo_put(&vbox_bo->bo);
}
static inline u64 vbox_bo_mmap_offset(struct vbox_bo *bo)
diff --git a/ubuntu/vbox/vboxvideo/vbox_mode.c b/ubuntu/vbox/vboxvideo/vbox_mode.c
index 422e20c8a4f8..e32400c37288 100644
--- a/ubuntu/vbox/vboxvideo/vbox_mode.c
+++ b/ubuntu/vbox/vboxvideo/vbox_mode.c
@@ -37,6 +37,9 @@
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0) || defined(RHEL_72)
#include <drm/drm_plane_helper.h>
#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0)
+#include <drm/drm_probe_helper.h>
+#endif
#include "vboxvideo.h"
@@ -169,6 +172,8 @@ static bool vbox_set_up_input_mapping(struct vbox_private *vbox)
if (to_vbox_crtc(crtci)->crtc_id != 0)
continue;
+ if (!CRTC_FB(crtci))
+ break;
vbox->single_framebuffer = true;
vbox->input_mapping_width = CRTC_FB(crtci)->width;
vbox->input_mapping_height = CRTC_FB(crtci)->height;
diff --git a/ubuntu/vbox/vboxvideo/vbox_ttm.c b/ubuntu/vbox/vboxvideo/vbox_ttm.c
index 4b9038700b19..60728da58092 100644
--- a/ubuntu/vbox/vboxvideo/vbox_ttm.c
+++ b/ubuntu/vbox/vboxvideo/vbox_ttm.c
@@ -60,6 +60,7 @@ static int vbox_ttm_global_init(struct vbox_private *vbox)
struct drm_global_reference *global_ref;
int ret;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0)
global_ref = &vbox->ttm.mem_global_ref;
global_ref->global_type = DRM_GLOBAL_TTM_MEM;
global_ref->size = sizeof(struct ttm_mem_global);
@@ -72,6 +73,7 @@ static int vbox_ttm_global_init(struct vbox_private *vbox)
}
vbox->ttm.bo_global_ref.mem_glob = vbox->ttm.mem_global_ref.object;
+#endif
global_ref = &vbox->ttm.bo_global_ref.ref;
global_ref->global_type = DRM_GLOBAL_TTM_BO;
global_ref->size = sizeof(struct ttm_bo_global);
@@ -81,7 +83,9 @@ static int vbox_ttm_global_init(struct vbox_private *vbox)
ret = drm_global_item_ref(global_ref);
if (ret) {
DRM_ERROR("Failed setting up TTM BO subsystem.\n");
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0)
drm_global_item_unref(&vbox->ttm.mem_global_ref);
+#endif
return ret;
}
@@ -96,15 +100,6 @@ static void vbox_ttm_global_release(struct vbox_private *vbox)
drm_global_item_unref(&vbox->ttm.bo_global_ref.ref);
drm_global_item_unref(&vbox->ttm.mem_global_ref);
}
-#else
-static inline int vbox_ttm_global_init(struct vbox_private *vbox)
-{
- return 0;
-}
-
-static inline void vbox_ttm_global_release(struct vbox_private *vbox)
-{
-}
#endif
static void vbox_bo_ttm_destroy(struct ttm_buffer_object *tbo)
@@ -293,9 +288,11 @@ int vbox_mm_init(struct vbox_private *vbox)
struct drm_device *dev = vbox->dev;
struct ttm_bo_device *bdev = &vbox->ttm.bdev;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0)
ret = vbox_ttm_global_init(vbox);
if (ret)
return ret;
+#endif
ret = ttm_bo_device_init(&vbox->ttm.bdev,
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0)
@@ -308,7 +305,11 @@ int vbox_mm_init(struct vbox_private *vbox)
DRM_FILE_PAGE_OFFSET, true);
if (ret) {
DRM_ERROR("Error initialising bo driver; %d\n", ret);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0)
goto err_ttm_global_release;
+#else
+ return ret;
+#endif
}
ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
@@ -330,8 +331,10 @@ int vbox_mm_init(struct vbox_private *vbox)
err_device_release:
ttm_bo_device_release(&vbox->ttm.bdev);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0)
err_ttm_global_release:
vbox_ttm_global_release(vbox);
+#endif
return ret;
}
@@ -345,7 +348,9 @@ void vbox_mm_fini(struct vbox_private *vbox)
arch_phys_wc_del(vbox->fb_mtrr);
#endif
ttm_bo_device_release(&vbox->ttm.bdev);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0)
vbox_ttm_global_release(vbox);
+#endif
}
void vbox_ttm_placement(struct vbox_bo *bo, int domain)
diff --git a/ubuntu/vbox/vboxvideo/version-generated.h b/ubuntu/vbox/vboxvideo/version-generated.h
index ff669b300eaa..85722b642c49 100644
--- a/ubuntu/vbox/vboxvideo/version-generated.h
+++ b/ubuntu/vbox/vboxvideo/version-generated.h
@@ -3,9 +3,9 @@
#define VBOX_VERSION_MAJOR 6
#define VBOX_VERSION_MINOR 0
-#define VBOX_VERSION_BUILD 4
-#define VBOX_VERSION_STRING_RAW "6.0.4"
-#define VBOX_VERSION_STRING "6.0.4_KernelUbuntu"
+#define VBOX_VERSION_BUILD 6
+#define VBOX_VERSION_STRING_RAW "6.0.6"
+#define VBOX_VERSION_STRING "6.0.6_KernelUbuntu"
#define VBOX_API_VERSION_STRING "6_0"
#define VBOX_PRIVATE_BUILD_DESC "Private build by buildd"
--
2.20.1
More information about the kernel-team
mailing list