VirtualBox on sc64's Kernel 5.10.x
by rahrah from LinuxQuestions.org on (#5CDDB)
Hi,
Virtualbox 6.1.16 kernel modules did not compile against the latest 5.10.x kernel in slackware64-current. The attached patch worked for me.
More details can be found upstream, here:
https://www.virtualbox.org/ticket/20055
with the patch here:
https://www.virtualbox.org/raw-attac...j-fix-r0.patch
sha1: ee313af087310b4d2f7126fb74710360c9f1fddf
The 6.1.17 testbuild (as others have pointed out) is available here:
https://www.virtualbox.org/wiki/Testbuilds
This is a patch for 5.10.x as a host.
Guests running 5.10.x kernels with VB Guest Additions, may need further tweaking.
Look at Sergio Basto's comment #12 (and following) at the link above.
Sergio (sergiomb2 on github) has a nice git clone of the VB repo, which seems to be up to date.
The patch at:
https://github.com/sergiomb2/Virtual...5fb495e2d04e97
(the patch now in the vb svn trunk) is substantially similar to the one attached here, but for a comment:
Code:/** @def IPRT_USE_APPLY_TO_PAGE_RANGE_FOR_EXEC
* alloc_vm_area was removed with 5.10 so we have to resort to a different way
* to allocate executable memory.
* It would be possible to remove IPRT_USE_ALLOC_VM_AREA_FOR_EXEC and use
* this path execlusively for 3.2+ but no time to test it really works on every
* supported kernel, so better play safe for now.
*/and this:
Code:- uint32_t idxPg = (uAddr - (unsigned long)pMemLnx->Core.pv) >> PAGE_SHIFT;
+ size_t idxPg = (uAddr - (unsigned long)pMemLnx->Core.pv) >> PAGE_SHIFT;Guest Additions
I haven't tested any vb guest running 5.10.x with vb guest additions.
I have tested a Slackware-current guest with the standard, vboxsf, vboxguest, vboxvideo, in tree kernel based modules: most things substantially work.
Just load the modules (you may need to kick them into action with an xradr -s trickery).
For full functionality, you might have to mess with the guest addition scripts, a little so as not to attempt to build the already built in-tree modules.
Cheers,
===Rich
File: linux-5.10-r0drv-memobj-fix-r0.patch
Code:Index: src/VBox/Runtime/r0drv/linux/memobj-r0drv-linux.c
===================================================================
--- src/VBox/Runtime/r0drv/linux/memobj-r0drv-linux.c (Revision 141658)
+++ src/VBox/Runtime/r0drv/linux/memobj-r0drv-linux.c (Arbeitskopie)
@@ -56,9 +56,12 @@
* Whether we use alloc_vm_area (3.2+) for executable memory.
* This is a must for 5.8+, but we enable it all the way back to 3.2.x for
* better W^R compliance (fExecutable flag). */
-#if RTLNX_VER_MIN(3,2,0) || defined(DOXYGEN_RUNNING)
+#if RTLNX_VER_RANGE(3,2,0, 5,10,0) || defined(DOXYGEN_RUNNING)
# define IPRT_USE_ALLOC_VM_AREA_FOR_EXEC
#endif
+#if RTLNX_VER_MIN(5,10,0) || defined(DOXYGEN_RUNNING)
+# define IPRT_USE_APPLY_TO_PAGE_RANGE_FOR_EXEC
+#endif
/*
* 2.6.29+ kernels don't work with remap_pfn_range() anymore because
@@ -502,7 +505,43 @@
}
+#ifdef IPRT_USE_APPLY_TO_PAGE_RANGE_FOR_EXEC
/**
+ * User data passed to the apply_to_page_range() callback.
+ */
+typedef struct LNXAPPLYPGRANGE
+{
+ /** Pointer to the memory object. */
+ PRTR0MEMOBJLNX pMemLnx;
+ /** The page protection flags to apply. */
+ pgprot_t fPg;
+} LNXAPPLYPGRANGE;
+/** Pointer to the user data. */
+typedef LNXAPPLYPGRANGE *PLNXAPPLYPGRANGE;
+/** Pointer to the const user data. */
+typedef const LNXAPPLYPGRANGE *PCLNXAPPLYPGRANGE;
+
+/**
+ * Callback called in apply_to_page_range().
+ *
+ * @returns Linux status code.
+ * @param pPte Pointer to the page table entry for the given address.
+ * @param uAddr The address to apply the new protection to.
+ * @param pvUser The opaque user data.
+ */
+static DECLCALLBACK(int) rtR0MemObjLinuxApplyPageRange(pte_t *pPte, unsigned long uAddr, void *pvUser)
+{
+ PCLNXAPPLYPGRANGE pArgs = (PCLNXAPPLYPGRANGE)pvUser;
+ PRTR0MEMOBJLNX pMemLnx = pArgs->pMemLnx;
+ uint32_t idxPg = (uAddr - (unsigned long)pMemLnx->Core.pv) >> PAGE_SHIFT;
+
+ set_pte(pPte, mk_pte(pMemLnx->apPages[idxPg], pArgs->fPg));
+ return 0;
+}
+#endif
+
+
+/**
* Maps the allocation into ring-0.
*
* This will update the RTR0MEMOBJLNX::Core.pv and RTR0MEMOBJ::fMappedToRing0 members.
@@ -584,6 +623,11 @@
else
# endif
{
+# if defined(IPRT_USE_APPLY_TO_PAGE_RANGE_FOR_EXEC)
+ if (fExecutable)
+ pgprot_val(fPg) |= _PAGE_NX; /* Uses RTR0MemObjProtect to clear NX when memory ready, W^X fashion. */
+# endif
+
# ifdef VM_MAP
pMemLnx->Core.pv = vmap(&pMemLnx->apPages[0], pMemLnx->cPages, VM_MAP, fPg);
# else
@@ -1851,6 +1895,21 @@
preempt_enable();
return VINF_SUCCESS;
}
+# elif defined(IPRT_USE_APPLY_TO_PAGE_RANGE_FOR_EXEC)
+ PRTR0MEMOBJLNX pMemLnx = (PRTR0MEMOBJLNX)pMem;
+ if ( pMemLnx->fExecutable
+ && pMemLnx->fMappedToRing0)
+ {
+ LNXAPPLYPGRANGE Args;
+ Args.pMemLnx = pMemLnx;
+ Args.fPg = rtR0MemObjLinuxConvertProt(fProt, true /*fKernel*/);
+ int rcLnx = apply_to_page_range(current->active_mm, (unsigned long)pMemLnx->Core.pv + offSub, cbSub,
+ rtR0MemObjLinuxApplyPageRange, (void *)&Args);
+ if (rcLnx)
+ return VERR_NOT_SUPPORTED;
+
+ return VINF_SUCCESS;
+ }
# endif
NOREF(pMem);


Virtualbox 6.1.16 kernel modules did not compile against the latest 5.10.x kernel in slackware64-current. The attached patch worked for me.
More details can be found upstream, here:
https://www.virtualbox.org/ticket/20055
with the patch here:
https://www.virtualbox.org/raw-attac...j-fix-r0.patch
sha1: ee313af087310b4d2f7126fb74710360c9f1fddf
The 6.1.17 testbuild (as others have pointed out) is available here:
https://www.virtualbox.org/wiki/Testbuilds
This is a patch for 5.10.x as a host.
Guests running 5.10.x kernels with VB Guest Additions, may need further tweaking.
Look at Sergio Basto's comment #12 (and following) at the link above.
Sergio (sergiomb2 on github) has a nice git clone of the VB repo, which seems to be up to date.
The patch at:
https://github.com/sergiomb2/Virtual...5fb495e2d04e97
(the patch now in the vb svn trunk) is substantially similar to the one attached here, but for a comment:
Code:/** @def IPRT_USE_APPLY_TO_PAGE_RANGE_FOR_EXEC
* alloc_vm_area was removed with 5.10 so we have to resort to a different way
* to allocate executable memory.
* It would be possible to remove IPRT_USE_ALLOC_VM_AREA_FOR_EXEC and use
* this path execlusively for 3.2+ but no time to test it really works on every
* supported kernel, so better play safe for now.
*/and this:
Code:- uint32_t idxPg = (uAddr - (unsigned long)pMemLnx->Core.pv) >> PAGE_SHIFT;
+ size_t idxPg = (uAddr - (unsigned long)pMemLnx->Core.pv) >> PAGE_SHIFT;Guest Additions
I haven't tested any vb guest running 5.10.x with vb guest additions.
I have tested a Slackware-current guest with the standard, vboxsf, vboxguest, vboxvideo, in tree kernel based modules: most things substantially work.
Just load the modules (you may need to kick them into action with an xradr -s trickery).
For full functionality, you might have to mess with the guest addition scripts, a little so as not to attempt to build the already built in-tree modules.
Cheers,
===Rich
File: linux-5.10-r0drv-memobj-fix-r0.patch
Code:Index: src/VBox/Runtime/r0drv/linux/memobj-r0drv-linux.c
===================================================================
--- src/VBox/Runtime/r0drv/linux/memobj-r0drv-linux.c (Revision 141658)
+++ src/VBox/Runtime/r0drv/linux/memobj-r0drv-linux.c (Arbeitskopie)
@@ -56,9 +56,12 @@
* Whether we use alloc_vm_area (3.2+) for executable memory.
* This is a must for 5.8+, but we enable it all the way back to 3.2.x for
* better W^R compliance (fExecutable flag). */
-#if RTLNX_VER_MIN(3,2,0) || defined(DOXYGEN_RUNNING)
+#if RTLNX_VER_RANGE(3,2,0, 5,10,0) || defined(DOXYGEN_RUNNING)
# define IPRT_USE_ALLOC_VM_AREA_FOR_EXEC
#endif
+#if RTLNX_VER_MIN(5,10,0) || defined(DOXYGEN_RUNNING)
+# define IPRT_USE_APPLY_TO_PAGE_RANGE_FOR_EXEC
+#endif
/*
* 2.6.29+ kernels don't work with remap_pfn_range() anymore because
@@ -502,7 +505,43 @@
}
+#ifdef IPRT_USE_APPLY_TO_PAGE_RANGE_FOR_EXEC
/**
+ * User data passed to the apply_to_page_range() callback.
+ */
+typedef struct LNXAPPLYPGRANGE
+{
+ /** Pointer to the memory object. */
+ PRTR0MEMOBJLNX pMemLnx;
+ /** The page protection flags to apply. */
+ pgprot_t fPg;
+} LNXAPPLYPGRANGE;
+/** Pointer to the user data. */
+typedef LNXAPPLYPGRANGE *PLNXAPPLYPGRANGE;
+/** Pointer to the const user data. */
+typedef const LNXAPPLYPGRANGE *PCLNXAPPLYPGRANGE;
+
+/**
+ * Callback called in apply_to_page_range().
+ *
+ * @returns Linux status code.
+ * @param pPte Pointer to the page table entry for the given address.
+ * @param uAddr The address to apply the new protection to.
+ * @param pvUser The opaque user data.
+ */
+static DECLCALLBACK(int) rtR0MemObjLinuxApplyPageRange(pte_t *pPte, unsigned long uAddr, void *pvUser)
+{
+ PCLNXAPPLYPGRANGE pArgs = (PCLNXAPPLYPGRANGE)pvUser;
+ PRTR0MEMOBJLNX pMemLnx = pArgs->pMemLnx;
+ uint32_t idxPg = (uAddr - (unsigned long)pMemLnx->Core.pv) >> PAGE_SHIFT;
+
+ set_pte(pPte, mk_pte(pMemLnx->apPages[idxPg], pArgs->fPg));
+ return 0;
+}
+#endif
+
+
+/**
* Maps the allocation into ring-0.
*
* This will update the RTR0MEMOBJLNX::Core.pv and RTR0MEMOBJ::fMappedToRing0 members.
@@ -584,6 +623,11 @@
else
# endif
{
+# if defined(IPRT_USE_APPLY_TO_PAGE_RANGE_FOR_EXEC)
+ if (fExecutable)
+ pgprot_val(fPg) |= _PAGE_NX; /* Uses RTR0MemObjProtect to clear NX when memory ready, W^X fashion. */
+# endif
+
# ifdef VM_MAP
pMemLnx->Core.pv = vmap(&pMemLnx->apPages[0], pMemLnx->cPages, VM_MAP, fPg);
# else
@@ -1851,6 +1895,21 @@
preempt_enable();
return VINF_SUCCESS;
}
+# elif defined(IPRT_USE_APPLY_TO_PAGE_RANGE_FOR_EXEC)
+ PRTR0MEMOBJLNX pMemLnx = (PRTR0MEMOBJLNX)pMem;
+ if ( pMemLnx->fExecutable
+ && pMemLnx->fMappedToRing0)
+ {
+ LNXAPPLYPGRANGE Args;
+ Args.pMemLnx = pMemLnx;
+ Args.fPg = rtR0MemObjLinuxConvertProt(fProt, true /*fKernel*/);
+ int rcLnx = apply_to_page_range(current->active_mm, (unsigned long)pMemLnx->Core.pv + offSub, cbSub,
+ rtR0MemObjLinuxApplyPageRange, (void *)&Args);
+ if (rcLnx)
+ return VERR_NOT_SUPPORTED;
+
+ return VINF_SUCCESS;
+ }
# endif
NOREF(pMem);