2018-03-21 05:07:32 +01:00
|
|
|
Index: VirtualBox-5.2.8/src/VBox/Runtime/r0drv/linux/waitqueue-r0drv-linux.h
|
2017-10-21 20:51:06 +02:00
|
|
|
===================================================================
|
2018-03-21 05:07:32 +01:00
|
|
|
--- VirtualBox-5.2.8.orig/src/VBox/Runtime/r0drv/linux/waitqueue-r0drv-linux.h
|
|
|
|
+++ VirtualBox-5.2.8/src/VBox/Runtime/r0drv/linux/waitqueue-r0drv-linux.h
|
2017-10-21 20:51:06 +02:00
|
|
|
@@ -46,7 +46,7 @@
|
|
|
|
typedef struct RTR0SEMLNXWAIT
|
|
|
|
{
|
|
|
|
/** The wait queue entry. */
|
|
|
|
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0)
|
|
|
|
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
|
|
|
|
wait_queue_entry_t WaitQE;
|
|
|
|
#else
|
|
|
|
wait_queue_t WaitQE;
|
2018-03-21 05:07:32 +01:00
|
|
|
Index: VirtualBox-5.2.8/src/VBox/HostDrivers/VBoxNetFlt/linux/VBoxNetFlt-linux.c
|
2017-11-07 17:41:30 +01:00
|
|
|
===================================================================
|
2018-03-21 05:07:32 +01:00
|
|
|
--- VirtualBox-5.2.8.orig/src/VBox/HostDrivers/VBoxNetFlt/linux/VBoxNetFlt-linux.c
|
|
|
|
+++ VirtualBox-5.2.8/src/VBox/HostDrivers/VBoxNetFlt/linux/VBoxNetFlt-linux.c
|
2018-01-12 18:14:46 +01:00
|
|
|
@@ -150,6 +150,10 @@ typedef struct VBOXNETFLTNOTIFIER *PVBOX
|
|
|
|
# endif
|
2017-11-07 17:41:30 +01:00
|
|
|
#endif
|
|
|
|
|
2017-11-11 04:41:47 +01:00
|
|
|
+# if 1
|
|
|
|
+#define SKB_GSO_UDP 0
|
|
|
|
+#endif
|
|
|
|
+
|
2017-11-07 17:41:30 +01:00
|
|
|
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 20, 0)
|
2017-11-11 04:41:47 +01:00
|
|
|
# define VBOX_HAVE_SKB_VLAN
|
|
|
|
#else
|
2018-03-21 05:07:32 +01:00
|
|
|
Index: VirtualBox-5.2.8/src/VBox/Additions/linux/drm/vbox_ttm.c
|
|
|
|
===================================================================
|
|
|
|
--- VirtualBox-5.2.8.orig/src/VBox/Additions/linux/drm/vbox_ttm.c
|
|
|
|
+++ VirtualBox-5.2.8/src/VBox/Additions/linux/drm/vbox_ttm.c
|
|
|
|
@@ -198,19 +198,13 @@ static void vbox_ttm_io_mem_free(struct
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
-static int vbox_bo_move(struct ttm_buffer_object *bo,
|
|
|
|
- bool evict, bool interruptible,
|
|
|
|
- bool no_wait_gpu, struct ttm_mem_reg *new_mem)
|
|
|
|
+static int vbox_bo_move(struct ttm_buffer_object *bo, bool evict,
|
|
|
|
+ struct ttm_operation_ctx *ctx,
|
|
|
|
+ struct ttm_mem_reg *new_mem)
|
|
|
|
{
|
|
|
|
int r;
|
|
|
|
|
|
|
|
-#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) && !defined(RHEL_74)
|
|
|
|
- r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
|
|
|
|
-#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0) && !defined(RHEL_74)
|
|
|
|
- r = ttm_bo_move_memcpy(bo, evict, interruptible, no_wait_gpu, new_mem);
|
|
|
|
-#else
|
|
|
|
- r = ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, new_mem);
|
|
|
|
-#endif
|
|
|
|
+ r = ttm_bo_move_memcpy(bo, ctx, new_mem);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
@@ -244,9 +238,9 @@ static struct ttm_tt *vbox_ttm_tt_create
|
|
|
|
return tt;
|
|
|
|
}
|
|
|
|
|
|
|
|
-static int vbox_ttm_tt_populate(struct ttm_tt *ttm)
|
|
|
|
+static int vbox_ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
|
|
|
|
{
|
|
|
|
- return ttm_pool_populate(ttm);
|
|
|
|
+ return ttm_pool_populate(ttm, ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void vbox_ttm_tt_unpopulate(struct ttm_tt *ttm)
|
|
|
|
@@ -254,6 +248,29 @@ static void vbox_ttm_tt_unpopulate(struc
|
|
|
|
ttm_pool_unpopulate(ttm);
|
|
|
|
}
|
|
|
|
|
|
|
|
+static struct drm_mm_node *vbox_find_mm_node(struct ttm_mem_reg *mem,
|
|
|
|
+ unsigned long *offset)
|
|
|
|
+{
|
|
|
|
+ struct drm_mm_node *mm_node = mem->mm_node;
|
|
|
|
+
|
|
|
|
+ while (*offset >= (mm_node->size << PAGE_SHIFT)) {
|
|
|
|
+ *offset -= (mm_node->size << PAGE_SHIFT);
|
|
|
|
+ ++mm_node;
|
|
|
|
+ }
|
|
|
|
+ return mm_node;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static unsigned long vbox_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
|
|
|
|
+ unsigned long page_offset)
|
|
|
|
+{
|
|
|
|
+ struct drm_mm_node *mm;
|
|
|
|
+ unsigned long offset = (page_offset << PAGE_SHIFT);
|
|
|
|
+
|
|
|
|
+ mm = vbox_find_mm_node(&bo->mem, &offset);
|
|
|
|
+ return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start +
|
|
|
|
+ (offset >> PAGE_SHIFT);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
struct ttm_bo_driver vbox_bo_driver = {
|
|
|
|
.ttm_tt_create = vbox_ttm_tt_create,
|
|
|
|
.ttm_tt_populate = vbox_ttm_tt_populate,
|
|
|
|
@@ -268,7 +285,7 @@ struct ttm_bo_driver vbox_bo_driver = {
|
|
|
|
.io_mem_reserve = &vbox_ttm_io_mem_reserve,
|
|
|
|
.io_mem_free = &vbox_ttm_io_mem_free,
|
|
|
|
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
|
|
|
|
- .io_mem_pfn = ttm_bo_default_io_mem_pfn,
|
|
|
|
+ .io_mem_pfn = vbox_ttm_io_mem_pfn,
|
|
|
|
#endif
|
|
|
|
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) \
|
|
|
|
|| defined(RHEL_74)
|
|
|
|
@@ -422,6 +439,7 @@ static inline u64 vbox_bo_gpu_offset(str
|
|
|
|
|
|
|
|
int vbox_bo_pin(struct vbox_bo *bo, u32 pl_flag, u64 *gpu_addr)
|
|
|
|
{
|
|
|
|
+ struct ttm_operation_ctx ctx = {false, false};
|
|
|
|
int i, ret;
|
|
|
|
|
|
|
|
if (bo->pin_count) {
|
|
|
|
@@ -437,7 +455,7 @@ int vbox_bo_pin(struct vbox_bo *bo, u32
|
|
|
|
for (i = 0; i < bo->placement.num_placement; i++)
|
|
|
|
PLACEMENT_FLAGS(bo->placements[i]) |= TTM_PL_FLAG_NO_EVICT;
|
|
|
|
|
|
|
|
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
|
|
|
|
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
@@ -451,6 +469,7 @@ int vbox_bo_pin(struct vbox_bo *bo, u32
|
|
|
|
|
|
|
|
int vbox_bo_unpin(struct vbox_bo *bo)
|
|
|
|
{
|
|
|
|
+ struct ttm_operation_ctx ctx = {false, false};
|
|
|
|
int i, ret;
|
|
|
|
|
|
|
|
if (!bo->pin_count) {
|
|
|
|
@@ -464,7 +483,7 @@ int vbox_bo_unpin(struct vbox_bo *bo)
|
|
|
|
for (i = 0; i < bo->placement.num_placement; i++)
|
|
|
|
PLACEMENT_FLAGS(bo->placements[i]) &= ~TTM_PL_FLAG_NO_EVICT;
|
|
|
|
|
|
|
|
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
|
|
|
|
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
@@ -478,6 +497,7 @@ int vbox_bo_unpin(struct vbox_bo *bo)
|
|
|
|
*/
|
|
|
|
int vbox_bo_push_sysram(struct vbox_bo *bo)
|
|
|
|
{
|
|
|
|
+ struct ttm_operation_ctx ctx = {false, false};
|
|
|
|
int i, ret;
|
|
|
|
|
|
|
|
if (!bo->pin_count) {
|
|
|
|
@@ -496,7 +516,7 @@ int vbox_bo_push_sysram(struct vbox_bo *
|
|
|
|
for (i = 0; i < bo->placement.num_placement; i++)
|
|
|
|
PLACEMENT_FLAGS(bo->placements[i]) |= TTM_PL_FLAG_NO_EVICT;
|
|
|
|
|
|
|
|
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
|
|
|
|
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
|
|
|
|
if (ret) {
|
|
|
|
DRM_ERROR("pushing to VRAM failed\n");
|
|
|
|
return ret;
|