Cycles: Refactor Device selection to allow individual GPU compute device selection
authorLukas Stockner <lukas.stockner@freenet.de>
Mon, 7 Nov 2016 01:33:53 +0000 (02:33 +0100)
committerLukas Stockner <lukas.stockner@freenet.de>
Mon, 7 Nov 2016 02:19:29 +0000 (03:19 +0100)
Previously, it was only possible to choose a single GPU or all of that type (CUDA or OpenCL).
Now, a toggle button is displayed for every device.
These settings are tied to the PCI Bus ID of the devices, so they're consistent across hardware addition and removal (but not when swapping/moving cards).

From the code perspective, the more important change is that now, the compute device properties are stored in the Addon preferences of the Cycles addon, instead of directly in the User Preferences.
This allows for a cleaner implementation, removing the Cycles C API functions that were called by the RNA code to specify the enum items.

Note that this change is neither backwards- nor forwards-compatible, but since it's only a User Preference no existing files are broken.

Reviewers: #cycles, brecht

Reviewed By: #cycles, brecht

Subscribers: brecht, juicyfruit, mib2berlin, Blendify

Differential Revision: https://developer.blender.org/D2338

18 files changed:
intern/cycles/blender/CCL_api.h
intern/cycles/blender/addon/properties.py
intern/cycles/blender/addon/ui.py
intern/cycles/blender/blender_python.cpp
intern/cycles/blender/blender_sync.cpp
intern/cycles/device/device.cpp
intern/cycles/device/device.h
intern/cycles/device/device_cuda.cpp
intern/cycles/device/device_intern.h
intern/cycles/device/device_multi.cpp
intern/cycles/device/device_opencl.cpp
intern/cycles/device/opencl/opencl.h
intern/cycles/device/opencl/opencl_util.cpp
intern/cycles/render/session.h
release/scripts/startup/bl_operators/wm.py
release/scripts/startup/bl_ui/space_userpref.py
source/blender/makesrna/intern/rna_userdef.c
source/blenderplayer/bad_level_call_stubs/stubs.c

index d3a68c4db4f798745488b3ec8721244f9e45d6dd..233ffc8802ce37119dbac8ea432975002eef3586 100644 (file)
 extern "C" {
 #endif
 
-/* returns a list of devices for selection, array is empty identifier
- * terminated and must not be freed */
-
-typedef struct CCLDeviceInfo {
-       char identifier[128];
-       char name[512];
-       int value;
-} CCLDeviceInfo;
-
-CCLDeviceInfo *CCL_compute_device_list(int device_type);
-
 /* create python module _cycles used by addon */
 
 void *CCL_python_module_init(void);
index a8ab9100deda987cea88dadc6325b2161afa1e0b..27c9b9220420335c4a197dd54e50c420790668e3 100644 (file)
@@ -21,7 +21,8 @@ from bpy.props import (BoolProperty,
                        EnumProperty,
                        FloatProperty,
                        IntProperty,
-                       PointerProperty)
+                       PointerProperty,
+                       StringProperty)
 
 # enums
 
@@ -122,6 +123,12 @@ enum_volume_interpolation = (
     ('CUBIC', "Cubic", "Smoothed high quality interpolation, but slower")
     )
 
+enum_device_type = (
+    ('CPU', "CPU", "CPU", 0),
+    ('CUDA', "CUDA", "CUDA", 1),
+    ('OPENCL', "OpenCL", "OpenCL", 2)
+    )
+
 
 class CyclesRenderSettings(bpy.types.PropertyGroup):
     @classmethod
@@ -1130,6 +1137,103 @@ class CyclesCurveSettings(bpy.types.PropertyGroup):
         del bpy.types.ParticleSettings.cycles
 
 
+class CyclesDeviceSettings(bpy.types.PropertyGroup):
+    @classmethod
+    def register(cls):
+        cls.id = StringProperty(name="ID")
+        cls.name = StringProperty(name="Name")
+        cls.use = BoolProperty(name="Use", default=True)
+        cls.type = EnumProperty(name="Type", items=enum_device_type, default='CUDA')
+
+
+class CyclesPreferences(bpy.types.AddonPreferences):
+    bl_idname = __package__
+
+    def get_device_types(self, context):
+        import _cycles
+        has_cuda, has_opencl = _cycles.get_device_types()
+        list = [('NONE', "None", "Don't use compute device", 0)]
+        if has_cuda:
+            list.append(('CUDA', "CUDA", "Use CUDA for GPU acceleration", 1))
+        if has_opencl:
+            list.append(('OPENCL', "OpenCL", "Use OpenCL for GPU acceleration", 2))
+        return list
+
+    compute_device_type = EnumProperty(
+            name="Compute Device Type",
+            description="Device to use for computation (rendering with Cycles)",
+            items=get_device_types,
+            )
+
+    devices = bpy.props.CollectionProperty(type=CyclesDeviceSettings)
+
+    def get_devices(self):
+        import _cycles
+        # Layout of the device tuples: (Name, Type, Internal ID, Persistent ID)
+        device_list = _cycles.available_devices()
+
+        cuda_devices = []
+        opencl_devices = []
+        for device in device_list:
+            if not device[1] in {'CUDA', 'OPENCL'}:
+                continue
+
+            entry = None
+            # Try to find existing Device entry
+            for dev in self.devices:
+                if dev.id == device[2] and dev.type == device[1]:
+                    entry = dev
+                    break
+            # Create new entry if no existing one was found
+            if not entry:
+                entry = self.devices.add()
+                entry.id   = device[2]
+                entry.name = device[0]
+                entry.type = device[1]
+
+            # Sort entries into lists
+            if entry.type == 'CUDA':
+                cuda_devices.append(entry)
+            elif entry.type == 'OPENCL':
+                opencl_devices.append(entry)
+        return cuda_devices, opencl_devices
+
+
+    def has_active_device(self):
+        import _cycles
+        device_list = _cycles.available_devices()
+        for device in device_list:
+            if device[1] != self.compute_device_type:
+                continue
+            if any(dev.use and dev.id == device[2] for dev in self.devices):
+                return True
+        return False
+
+
+    def draw_impl(self, layout, context):
+        layout.label(text="Compute Device:")
+        layout.row().prop(self, "compute_device_type", expand=True)
+
+        cuda_devices, opencl_devices = self.get_devices()
+        row = layout.row()
+
+        if cuda_devices:
+            col = row.column(align=True)
+            col.label(text="CUDA devices:")
+            for device in cuda_devices:
+                col.prop(device, "use", text=device.name, toggle=True)
+
+        if opencl_devices:
+            col = row.column(align=True)
+            col.label(text="OpenCL devices:")
+            for device in opencl_devices:
+                col.prop(device, "use", text=device.name, toggle=True)
+
+
+    def draw(self, context):
+        self.draw_impl(self.layout, context)
+
+
 def register():
     bpy.utils.register_class(CyclesRenderSettings)
     bpy.utils.register_class(CyclesCameraSettings)
@@ -1141,6 +1245,8 @@ def register():
     bpy.utils.register_class(CyclesObjectSettings)
     bpy.utils.register_class(CyclesCurveRenderSettings)
     bpy.utils.register_class(CyclesCurveSettings)
+    bpy.utils.register_class(CyclesDeviceSettings)
+    bpy.utils.register_class(CyclesPreferences)
 
 
 def unregister():
@@ -1154,3 +1260,5 @@ def unregister():
     bpy.utils.unregister_class(CyclesVisibilitySettings)
     bpy.utils.unregister_class(CyclesCurveRenderSettings)
     bpy.utils.unregister_class(CyclesCurveSettings)
+    bpy.utils.unregister_class(CyclesDeviceSettings)
+    bpy.utils.unregister_class(CyclesPreferences)
index 4942a71ce4d4b864b5be2bf11ee30a92bd1e86e0..d9ad7d967a6019b387b76ff4fe46ddbc2527b682 100644 (file)
@@ -53,25 +53,26 @@ class CyclesButtonsPanel:
         return rd.engine in cls.COMPAT_ENGINES
 
 
+def get_device_type(context):
+    return context.user_preferences.addons[__package__].preferences.compute_device_type
+
+
 def use_cpu(context):
     cscene = context.scene.cycles
-    device_type = context.user_preferences.system.compute_device_type
 
-    return (device_type == 'NONE' or cscene.device == 'CPU')
+    return (get_device_type(context) == 'NONE' or cscene.device == 'CPU')
 
 
 def use_opencl(context):
     cscene = context.scene.cycles
-    device_type = context.user_preferences.system.compute_device_type
 
-    return (device_type == 'OPENCL' and cscene.device == 'GPU')
+    return (get_device_type(context) == 'OPENCL' and cscene.device == 'GPU')
 
 
 def use_cuda(context):
     cscene = context.scene.cycles
-    device_type = context.user_preferences.system.compute_device_type
 
-    return (device_type == 'CUDA' and cscene.device == 'GPU')
+    return (get_device_type(context) == 'CUDA' and cscene.device == 'GPU')
 
 
 def use_branched_path(context):
@@ -85,6 +86,14 @@ def use_sample_all_lights(context):
 
     return cscene.sample_all_lights_direct or cscene.sample_all_lights_indirect
 
+def show_device_selection(context):
+    type = get_device_type(context)
+    if type == 'NETWORK':
+        return True
+    if not type in {'CUDA', 'OPENCL'}:
+        return False
+    return context.user_preferences.addons[__package__].preferences.has_active_device()
+
 
 def draw_samples_info(layout, context):
     cscene = context.scene.cycles
@@ -141,7 +150,6 @@ class CyclesRender_PT_sampling(CyclesButtonsPanel, Panel):
 
         scene = context.scene
         cscene = scene.cycles
-        device_type = context.user_preferences.system.compute_device_type
 
         row = layout.row(align=True)
         row.menu("CYCLES_MT_sampling_presets", text=bpy.types.CYCLES_MT_sampling_presets.bl_label)
@@ -150,7 +158,7 @@ class CyclesRender_PT_sampling(CyclesButtonsPanel, Panel):
 
         row = layout.row()
         sub = row.row()
-        sub.active = device_type != 'OPENCL' or use_cpu(context)
+        sub.active = get_device_type(context) != 'OPENCL' or use_cpu(context)
         sub.prop(cscene, "progressive", text="")
         row.prop(cscene, "use_square_samples")
 
@@ -364,6 +372,8 @@ class CyclesRender_PT_performance(CyclesButtonsPanel, Panel):
         rd = scene.render
         cscene = scene.cycles
 
+        context.user_preferences.addons['cycles'].preferences.draw_impl(layout, context)
+
         split = layout.split()
 
         col = split.column(align=True)
@@ -1606,9 +1616,13 @@ def draw_device(self, context):
 
         layout.prop(cscene, "feature_set")
 
-        device_type = context.user_preferences.system.compute_device_type
-        if device_type in {'CUDA', 'OPENCL', 'NETWORK'}:
-            layout.prop(cscene, "device")
+        split = layout.split(percentage=1/3)
+        split.label("Device:")
+        row = split.row(align=True)
+        sub = row.split(align=True)
+        sub.active = show_device_selection(context)
+        sub.prop(cscene, "device", text="")
+        row.operator("wm.addon_userpref_show", text="Preferences", icon='PREFERENCES').module = __package__
 
         if engine.with_osl() and use_cpu(context):
             layout.prop(cscene, "shading_system")
index a50f5edb1dfd46a0fe43be5b0592a8a53eeaa96b..438abc49f88be4ac4e928bc0c939ac829d98cd44 100644 (file)
@@ -40,10 +40,6 @@ CCL_NAMESPACE_BEGIN
 
 namespace {
 
-/* Device list stored static (used by compute_device_list()). */
-static ccl::vector<CCLDeviceInfo> device_list;
-static ccl::DeviceType device_type = DEVICE_NONE;
-
 /* Flag describing whether debug flags were synchronized from scene. */
 bool debug_flags_set = false;
 
@@ -195,7 +191,6 @@ static PyObject *exit_func(PyObject * /*self*/, PyObject * /*args*/)
        ShaderManager::free_memory();
        TaskScheduler::free_memory();
        Device::free_memory();
-       device_list.free_memory();
        Py_RETURN_NONE;
 }
 
@@ -389,7 +384,12 @@ static PyObject *available_devices_func(PyObject * /*self*/, PyObject * /*args*/
 
        for(size_t i = 0; i < devices.size(); i++) {
                DeviceInfo& device = devices[i];
-               PyTuple_SET_ITEM(ret, i, PyUnicode_FromString(device.description.c_str()));
+               string type_name = Device::string_from_type(device.type);
+               PyObject *device_tuple = PyTuple_New(3);
+               PyTuple_SET_ITEM(device_tuple, 0, PyUnicode_FromString(device.description.c_str()));
+               PyTuple_SET_ITEM(device_tuple, 1, PyUnicode_FromString(type_name.c_str()));
+               PyTuple_SET_ITEM(device_tuple, 2, PyUnicode_FromString(device.id.c_str()));
+               PyTuple_SET_ITEM(ret, i, device_tuple);
        }
 
        return ret;
@@ -676,6 +676,20 @@ static PyObject *set_resumable_chunks_func(PyObject * /*self*/, PyObject *args)
        Py_RETURN_NONE;
 }
 
+static PyObject *get_device_types_func(PyObject * /*self*/, PyObject * /*args*/)
+{
+       vector<DeviceInfo>& devices = Device::available_devices();
+       bool has_cuda = false, has_opencl = false;
+       for(int i = 0; i < devices.size(); i++) {
+               has_cuda   |= (devices[i].type == DEVICE_CUDA);
+               has_opencl |= (devices[i].type == DEVICE_OPENCL);
+       }
+       PyObject *list = PyTuple_New(2);
+       PyTuple_SET_ITEM(list, 0, PyBool_FromLong(has_cuda));
+       PyTuple_SET_ITEM(list, 1, PyBool_FromLong(has_opencl));
+       return list;
+}
+
 static PyMethodDef methods[] = {
        {"init", init_func, METH_VARARGS, ""},
        {"exit", exit_func, METH_VARARGS, ""},
@@ -703,6 +717,9 @@ static PyMethodDef methods[] = {
        /* Resumable render */
        {"set_resumable_chunks", set_resumable_chunks_func, METH_VARARGS, ""},
 
+       /* Compute Device selection */
+       {"get_device_types", get_device_types_func, METH_VARARGS, ""},
+
        {NULL, NULL, 0, NULL},
 };
 
@@ -715,47 +732,6 @@ static struct PyModuleDef module = {
        NULL, NULL, NULL, NULL
 };
 
-static CCLDeviceInfo *compute_device_list(DeviceType type)
-{
-       /* create device list if it's not already done */
-       if(type != device_type) {
-               ccl::vector<DeviceInfo>& devices = ccl::Device::available_devices();
-
-               device_type = type;
-               device_list.clear();
-
-               /* add devices */
-               int i = 0;
-
-               foreach(DeviceInfo& info, devices) {
-                       if(info.type == type ||
-                          (info.type == DEVICE_MULTI && info.multi_devices[0].type == type))
-                       {
-                               CCLDeviceInfo cinfo;
-
-                               strncpy(cinfo.identifier, info.id.c_str(), sizeof(cinfo.identifier));
-                               cinfo.identifier[info.id.length()] = '\0';
-
-                               strncpy(cinfo.name, info.description.c_str(), sizeof(cinfo.name));
-                               cinfo.name[info.description.length()] = '\0';
-
-                               cinfo.value = i++;
-
-                               device_list.push_back(cinfo);
-                       }
-               }
-
-               /* null terminate */
-               if(!device_list.empty()) {
-                       CCLDeviceInfo cinfo = {"", "", 0};
-                       device_list.push_back(cinfo);
-               }
-       }
-
-       return (device_list.empty())? NULL: &device_list[0];
-}
-
-
 CCL_NAMESPACE_END
 
 void *CCL_python_module_init()
@@ -794,24 +770,3 @@ void *CCL_python_module_init()
 
        return (void*)mod;
 }
-
-CCLDeviceInfo *CCL_compute_device_list(int device_type)
-{
-       ccl::DeviceType type;
-       switch(device_type) {
-               case 0:
-                       type = ccl::DEVICE_CUDA;
-                       break;
-               case 1:
-                       type = ccl::DEVICE_OPENCL;
-                       break;
-               case 2:
-                       type = ccl::DEVICE_NETWORK;
-                       break;
-               default:
-                       type = ccl::DEVICE_NONE;
-                       break;
-       }
-       return ccl::compute_device_list(type);
-}
-
index bc5c3bb80967425aea3d700b43eb37d7cdb853fe..6e466826c35ae55a781a72ac03c1a72e723f79df 100644 (file)
@@ -531,7 +531,12 @@ SessionParams BlenderSync::get_session_params(BL::RenderEngine& b_engine,
        vector<DeviceInfo>& devices = Device::available_devices();
        
        /* device default CPU */
-       params.device = devices[0];
+       foreach(DeviceInfo& device, devices) {
+               if(device.type == DEVICE_CPU) {
+                       params.device = device;
+                       break;
+               }
+       }
 
        if(get_enum(cscene, "device") == 2) {
                /* find network device */
@@ -540,17 +545,39 @@ SessionParams BlenderSync::get_session_params(BL::RenderEngine& b_engine,
                                params.device = info;
        }
        else if(get_enum(cscene, "device") == 1) {
-               /* find GPU device with given id */
-               PointerRNA systemptr = b_userpref.system().ptr;
-               PropertyRNA *deviceprop = RNA_struct_find_property(&systemptr, "compute_device");
-               int device_id = b_userpref.system().compute_device();
+               PointerRNA b_preferences;
 
-               const char *id;
+               BL::UserPreferences::addons_iterator b_addon_iter;
+               for(b_userpref.addons.begin(b_addon_iter); b_addon_iter != b_userpref.addons.end(); ++b_addon_iter) {
+                       if(b_addon_iter->module() == "cycles") {
+                               b_preferences = b_addon_iter->preferences().ptr;
+                               break;
+                       }
+               }
 
-               if(RNA_property_enum_identifier(NULL, &systemptr, deviceprop, device_id, &id)) {
-                       foreach(DeviceInfo& info, devices)
-                               if(info.id == id)
-                                       params.device = info;
+               int compute_device = get_enum(b_preferences, "compute_device_type");
+
+               if(compute_device != 0) {
+                       vector<DeviceInfo> used_devices;
+                       RNA_BEGIN(&b_preferences, device, "devices") {
+                               if(get_enum(device, "type") == compute_device && get_boolean(device, "use")) {
+                                       string id = get_string(device, "id");
+                                       foreach(DeviceInfo& info, devices) {
+                                               if(info.id == id) {
+                                                       used_devices.push_back(info);
+                                                       break;
+                                               }
+                                       }
+                               }
+                       } RNA_END
+
+                       if(used_devices.size() == 1) {
+                               params.device = used_devices[0];
+                       }
+                       else if(used_devices.size() > 1) {
+                               params.device = Device::get_multi_device(used_devices);
+                       }
+                       /* Else keep using the CPU device that was set before. */
                }
        }
 
index 909ec7a6d60c83f64ae3f899c60ca3cf629432fe..ff9387b0a8ab6fdac7cfdd16f29691754a3ed094 100644 (file)
@@ -258,33 +258,33 @@ Device *Device::create(DeviceInfo& info, Stats &stats, bool background)
 
 DeviceType Device::type_from_string(const char *name)
 {
-       if(strcmp(name, "cpu") == 0)
+       if(strcmp(name, "CPU") == 0)
                return DEVICE_CPU;
-       else if(strcmp(name, "cuda") == 0)
+       else if(strcmp(name, "CUDA") == 0)
                return DEVICE_CUDA;
-       else if(strcmp(name, "opencl") == 0)
+       else if(strcmp(name, "OPENCL") == 0)
                return DEVICE_OPENCL;
-       else if(strcmp(name, "network") == 0)
+       else if(strcmp(name, "NETWORK") == 0)
                return DEVICE_NETWORK;
-       else if(strcmp(name, "multi") == 0)
+       else if(strcmp(name, "MULTI") == 0)
                return DEVICE_MULTI;
-       
+
        return DEVICE_NONE;
 }
 
 string Device::string_from_type(DeviceType type)
 {
        if(type == DEVICE_CPU)
-               return "cpu";
+               return "CPU";
        else if(type == DEVICE_CUDA)
-               return "cuda";
+               return "CUDA";
        else if(type == DEVICE_OPENCL)
-               return "opencl";
+               return "OPENCL";
        else if(type == DEVICE_NETWORK)
-               return "network";
+               return "NETWORK";
        else if(type == DEVICE_MULTI)
-               return "multi";
-       
+               return "MULTI";
+
        return "";
 }
 
@@ -307,9 +307,6 @@ vector<DeviceType>& Device::available_types()
 #ifdef WITH_NETWORK
                types.push_back(DEVICE_NETWORK);
 #endif
-#ifdef WITH_MULTI
-               types.push_back(DEVICE_MULTI);
-#endif
 
                need_types_update = false;
        }
@@ -331,10 +328,6 @@ vector<DeviceInfo>& Device::available_devices()
                        device_opencl_info(devices);
 #endif
 
-#ifdef WITH_MULTI
-               device_multi_info(devices);
-#endif
-
                device_cpu_info(devices);
 
 #ifdef WITH_NETWORK
@@ -368,6 +361,29 @@ string Device::device_capabilities()
        return capabilities;
 }
 
+DeviceInfo Device::get_multi_device(vector<DeviceInfo> subdevices)
+{
+       assert(subdevices.size() > 1);
+
+       DeviceInfo info;
+       info.type = DEVICE_MULTI;
+       info.id = "MULTI";
+       info.description = "Multi Device";
+       info.multi_devices = subdevices;
+       info.num = 0;
+
+       info.has_bindless_textures = true;
+       info.pack_images = false;
+       foreach(DeviceInfo &device, subdevices) {
+               assert(device.type == info.multi_devices[0].type);
+
+               info.pack_images |= device.pack_images;
+               info.has_bindless_textures &= device.has_bindless_textures;
+       }
+
+       return info;
+}
+
 void Device::tag_update()
 {
        need_types_update = true;
index 77dc1fa9713390fe9daa0b30523db4f3a65312b7..b9bdffa2618830f5b129217b1543fbce0eb3550f 100644 (file)
@@ -49,7 +49,7 @@ class DeviceInfo {
 public:
        DeviceType type;
        string description;
-       string id;
+       string id; /* used for user preferences, should stay fixed with changing hardware config */
        int num;
        bool display_device;
        bool advanced_shading;
@@ -69,6 +69,12 @@ public:
                has_bindless_textures = false;
                use_split_kernel = false;
        }
+
+       bool operator==(const DeviceInfo &info) {
+               /* Multiple Devices with the same ID would be very bad. */
+               assert(id != info.id || (type == info.type && num == info.num && description == info.description));
+               return id == info.id;
+       }
 };
 
 class DeviceRequestedFeatures {
@@ -282,6 +288,7 @@ public:
        static vector<DeviceType>& available_types();
        static vector<DeviceInfo>& available_devices();
        static string device_capabilities();
+       static DeviceInfo get_multi_device(vector<DeviceInfo> subdevices);
 
        /* Tag devices lists for update. */
        static void tag_update();
index 73c9221e6a213dd6957b4fb2bff7cb4f84b98023..a4818aa3b8de6b686d7409171cc1c1e9c232d5a9 100644 (file)
@@ -1408,13 +1408,18 @@ void device_cuda_info(vector<DeviceInfo>& devices)
 
                info.type = DEVICE_CUDA;
                info.description = string(name);
-               info.id = string_printf("CUDA_%d", num);
                info.num = num;
 
                info.advanced_shading = (major >= 2);
                info.has_bindless_textures = (major >= 3);
                info.pack_images = false;
 
+               int pci_location[3] = {0, 0, 0};
+               cuDeviceGetAttribute(&pci_location[0], CU_DEVICE_ATTRIBUTE_PCI_DOMAIN_ID, num);
+               cuDeviceGetAttribute(&pci_location[1], CU_DEVICE_ATTRIBUTE_PCI_BUS_ID, num);
+               cuDeviceGetAttribute(&pci_location[2], CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID, num);
+               info.id = string_printf("CUDA_%s_%04x:%02x:%02x", name, pci_location[0], pci_location[1], pci_location[2]);
+
                /* if device has a kernel timeout, assume it is used for display */
                if(cuDeviceGetAttribute(&attr, CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT, num) == CUDA_SUCCESS && attr == 1) {
                        info.description += " (Display)";
index 47584ae6d226714f1d24610d2790ffddfb5b772e..de487649045aaa283c96c4eccc843785810433ae 100644 (file)
@@ -33,7 +33,6 @@ void device_cpu_info(vector<DeviceInfo>& devices);
 void device_opencl_info(vector<DeviceInfo>& devices);
 void device_cuda_info(vector<DeviceInfo>& devices);
 void device_network_info(vector<DeviceInfo>& devices);
-void device_multi_info(vector<DeviceInfo>& devices);
 
 string device_cpu_capabilities(void);
 string device_opencl_capabilities(void);
index ef257358b224755774813c929d6067162cd2c540..48fd159d50888f0149643ff507db82533d869fbd 100644 (file)
@@ -350,120 +350,5 @@ Device *device_multi_create(DeviceInfo& info, Stats &stats, bool background)
        return new MultiDevice(info, stats, background);
 }
 
-static bool device_multi_add(vector<DeviceInfo>& devices, DeviceType type, bool with_display, bool with_advanced_shading, const char *id_fmt, int num)
-{
-       DeviceInfo info;
-
-       /* create map to find duplicate descriptions */
-       map<string, int> dupli_map;
-       map<string, int>::iterator dt;
-       int num_added = 0, num_display = 0;
-
-       info.advanced_shading = with_advanced_shading;
-       info.pack_images = false;
-       info.has_bindless_textures = true;
-
-       foreach(DeviceInfo& subinfo, devices) {
-               if(subinfo.type == type) {
-                       if(subinfo.advanced_shading != info.advanced_shading)
-                               continue;
-                       if(subinfo.display_device) {
-                               if(with_display)
-                                       num_display++;
-                               else
-                                       continue;
-                       }
-
-                       string key = subinfo.description;
-
-                       if(dupli_map.find(key) == dupli_map.end())
-                               dupli_map[key] = 1;
-                       else
-                               dupli_map[key]++;
-
-                       info.multi_devices.push_back(subinfo);
-                       if(subinfo.display_device)
-                               info.display_device = true;
-                       info.pack_images = info.pack_images || subinfo.pack_images;
-                       info.has_bindless_textures = info.has_bindless_textures && subinfo.has_bindless_textures;
-                       num_added++;
-               }
-       }
-
-       if(num_added <= 1 || (with_display && num_display == 0))
-               return false;
-
-       /* generate string */
-       stringstream desc;
-       vector<string> last_tokens;
-       bool first = true;
-
-       for(dt = dupli_map.begin(); dt != dupli_map.end(); dt++) {
-               if(!first) desc << " + ";
-               first = false;
-
-               /* get name and count */
-               string name = dt->first;
-               int count = dt->second;
-
-               /* strip common prefixes */
-               vector<string> tokens;
-               string_split(tokens, dt->first);
-
-               if(tokens.size() > 1) {
-                       int i;
-
-                       for(i = 0; i < tokens.size() && i < last_tokens.size(); i++)
-                               if(tokens[i] != last_tokens[i])
-                                       break;
-
-                       name = "";
-                       for(; i < tokens.size(); i++) {
-                               name += tokens[i];
-                               if(i != tokens.size() - 1)
-                                       name += " ";
-                       }
-               }
-
-               last_tokens = tokens;
-
-               /* add */
-               if(count > 1)
-                       desc << name << " (" << count << "x)";
-               else
-                       desc << name;
-       }
-
-       /* add info */
-       info.type = DEVICE_MULTI;
-       info.description = desc.str();
-       info.id = string_printf(id_fmt, num);
-       info.display_device = with_display;
-       info.num = 0;
-
-       if(with_display)
-               devices.push_back(info);
-       else
-               devices.insert(devices.begin(), info);
-       
-       return true;
-}
-
-void device_multi_info(vector<DeviceInfo>& devices)
-{
-       int num = 0;
-
-       if(!device_multi_add(devices, DEVICE_CUDA, false, true, "CUDA_MULTI_%d", num++))
-               device_multi_add(devices, DEVICE_CUDA, false, false, "CUDA_MULTI_%d", num++);
-       if(!device_multi_add(devices, DEVICE_CUDA, true, true, "CUDA_MULTI_%d", num++))
-               device_multi_add(devices, DEVICE_CUDA, true, false, "CUDA_MULTI_%d", num++);
-
-       num = 0;
-       if(!device_multi_add(devices, DEVICE_OPENCL, false, true, "OPENCL_MULTI_%d", num++))
-               device_multi_add(devices, DEVICE_OPENCL, false, false, "OPENCL_MULTI_%d", num++);
-       if(!device_multi_add(devices, DEVICE_OPENCL, true, true, "OPENCL_MULTI_%d", num++))
-               device_multi_add(devices, DEVICE_OPENCL, true, false, "OPENCL_MULTI_%d", num++);
-}
-
 CCL_NAMESPACE_END
 
index 45cf6b074e9db119640936f88f6ea35aaddaf4b5..ba94c592a5fa601899cd824ebf482fa421ff8a5c 100644 (file)
@@ -83,17 +83,22 @@ void device_opencl_info(vector<DeviceInfo>& devices)
                const string& platform_name = platform_device.platform_name;
                const cl_device_type device_type = platform_device.device_type;
                const string& device_name = platform_device.device_name;
+               string hardware_id = platform_device.hardware_id;
+               if(hardware_id == "") {
+                       hardware_id = string_printf("ID_%d", num_devices);
+               }
+
                DeviceInfo info;
                info.type = DEVICE_OPENCL;
                info.description = string_remove_trademark(string(device_name));
                info.num = num_devices;
-               info.id = string_printf("OPENCL_%d", info.num);
                /* We don't know if it's used for display, but assume it is. */
                info.display_device = true;
                info.advanced_shading = OpenCLInfo::kernel_use_advanced_shading(platform_name);
                info.pack_images = true;
                info.use_split_kernel = OpenCLInfo::kernel_use_split(platform_name,
                                                                     device_type);
+               info.id = string("OPENCL_") + platform_name + "_" + device_name + "_" + hardware_id;
                devices.push_back(info);
                num_devices++;
        }
index 30a35acbb2ac566497b5a5b7c4801a7722a38e22..054ac9014f0aced6f9e1bddde52b63256ffadee0 100644 (file)
@@ -55,17 +55,20 @@ struct OpenCLPlatformDevice {
                             const string& platform_name,
                             cl_device_id device_id,
                             cl_device_type device_type,
-                            const string& device_name)
+                            const string& device_name,
+                            const string& hardware_id)
          : platform_id(platform_id),
            platform_name(platform_name),
            device_id(device_id),
            device_type(device_type),
-           device_name(device_name) {}
+           device_name(device_name),
+           hardware_id(hardware_id) {}
        cl_platform_id platform_id;
        string platform_name;
        cl_device_id device_id;
        cl_device_type device_type;
        string device_name;
+       string hardware_id;
 };
 
 /* Contains all static OpenCL helper functions. */
@@ -83,6 +86,8 @@ public:
                                           string *error = NULL);
        static bool device_version_check(cl_device_id device,
                                         string *error = NULL);
+       static string get_hardware_id(string platform_name,
+                                     cl_device_id device_id);
        static void get_usable_devices(vector<OpenCLPlatformDevice> *usable_devices,
                                       bool force_all = false);
 };
index e425ae8e2e8de2dfa2f6d56a18ef47bb1aa5bc56..36eb70b8c856502fbfec8ee0fa571ade9040a5d1 100644 (file)
@@ -661,6 +661,27 @@ bool OpenCLInfo::device_version_check(cl_device_id device,
        return true;
 }
 
+string OpenCLInfo::get_hardware_id(string platform_name, cl_device_id device_id)
+{
+       if(platform_name == "AMD Accelerated Parallel Processing" || platform_name == "Apple") {
+               /* Use cl_amd_device_topology extension. */
+               cl_char topology[24];
+               if(clGetDeviceInfo(device_id, 0x4037, sizeof(topology), topology, NULL) == CL_SUCCESS && topology[0] == 1) {
+                       return string_printf("%02x:%02x.%01x", topology[21], topology[22], topology[23]);
+               }
+       }
+       else if(platform_name == "NVIDIA CUDA") {
+               /* Use two undocumented options of the cl_nv_device_attribute_query extension. */
+               cl_int bus_id, slot_id;
+               if(clGetDeviceInfo(device_id, 0x4008, sizeof(cl_int), &bus_id,  NULL) == CL_SUCCESS &&
+                  clGetDeviceInfo(device_id, 0x4009, sizeof(cl_int), &slot_id, NULL) == CL_SUCCESS) {
+                       return string_printf("%02x:%02x.%01x", bus_id, slot_id>>3, slot_id & 0x7);
+               }
+       }
+       /* No general way to get a hardware ID from OpenCL => give up. */
+       return "";
+}
+
 void OpenCLInfo::get_usable_devices(vector<OpenCLPlatformDevice> *usable_devices,
                                     bool force_all)
 {
@@ -773,11 +794,13 @@ void OpenCLInfo::get_usable_devices(vector<OpenCLPlatformDevice> *usable_devices
                                        continue;
                                }
                                FIRST_VLOG(2) << "Adding new device " << device_name << ".";
+                               string hardware_id = get_hardware_id(platform_name, device_id);
                                usable_devices->push_back(OpenCLPlatformDevice(platform_id,
                                                                               platform_name,
                                                                               device_id,
                                                                               device_type,
-                                                                              device_name));
+                                                                              device_name,
+                                                                              hardware_id));
                        }
                        else {
                                FIRST_VLOG(2) << "Ignoring device " << device_name
index 8bff0f9ed15fcb51dd2bd83ad1cb5373fd487f78..1db4692e171a94436db26cb2b92631102a6233c8 100644 (file)
@@ -89,8 +89,7 @@ public:
        }
 
        bool modified(const SessionParams& params)
-       { return !(device.type == params.device.type
-               && device.id == params.device.id
+       { return !(device == params.device
                && background == params.background
                && progressive_refine == params.progressive_refine
                && output_path == params.output_path
index 1c97d213e0599d79f66a7ee094e1fd752e57f0ad..343fcdb0d222a45a28b591f9b700440dbdbe4640 100644 (file)
@@ -2163,3 +2163,32 @@ class WM_OT_addon_expand(Operator):
             info["show_expanded"] = not info["show_expanded"]
 
         return {'FINISHED'}
+
+class WM_OT_addon_userpref_show(Operator):
+    "Show add-on user preferences"
+    bl_idname = "wm.addon_userpref_show"
+    bl_label = ""
+    bl_options = {'INTERNAL'}
+
+    module = StringProperty(
+            name="Module",
+            description="Module name of the add-on to expand",
+            )
+
+    def execute(self, context):
+        import addon_utils
+
+        module_name = self.module
+
+        modules = addon_utils.modules(refresh=False)
+        mod = addon_utils.addons_fake_modules.get(module_name)
+        if mod is not None:
+            info = addon_utils.module_bl_info(mod)
+            info["show_expanded"] = True
+
+            bpy.context.user_preferences.active_section = 'ADDONS'
+            context.window_manager.addon_filter = 'All'
+            context.window_manager.addon_search = info["name"]
+            bpy.ops.screen.userpref_show('INVOKE_DEFAULT')
+
+        return {'FINISHED'}
index dcafac66fcac44da5ac0e02517ad87b452808b8d..ab3ec3559e551ca08cefee879afcdea103ad6c0a 100644 (file)
@@ -429,13 +429,6 @@ class USERPREF_PT_system(Panel):
 
         col.separator()
 
-        if hasattr(system, "compute_device_type"):
-            col.label(text="Compute Device:")
-            col.row().prop(system, "compute_device_type", expand=True)
-            sub = col.row()
-            sub.active = system.compute_device_type != 'CPU'
-            sub.prop(system, "compute_device", text="")
-
         if hasattr(system, "opensubdiv_compute_type"):
             col.label(text="OpenSubdiv compute:")
             col.row().prop(system, "opensubdiv_compute_type", text="")
index 8ad016007f45a5b3d2532d91a19c2c5b912085f9..10807c32b910e959d3a7eb804bb9a05735d4ef3f 100644 (file)
 #include "BLT_lang.h"
 #include "GPU_buffers.h"
 
-#ifdef WITH_CYCLES
-static EnumPropertyItem compute_device_type_items[] = {
-       {USER_COMPUTE_DEVICE_NONE, "NONE", 0, "None", "Don't use compute device"},
-       {USER_COMPUTE_DEVICE_CUDA, "CUDA", 0, "CUDA", "Use CUDA for GPU acceleration"},
-       {USER_COMPUTE_DEVICE_OPENCL, "OPENCL", 0, "OpenCL", "Use OpenCL for GPU acceleration"},
-       { 0, NULL, 0, NULL, NULL}
-};
-#endif
-
 #ifdef WITH_OPENSUBDIV
 static EnumPropertyItem opensubdiv_compute_type_items[] = {
        {USER_OPENSUBDIV_COMPUTE_NONE, "NONE", 0, "None", ""},
@@ -124,8 +115,6 @@ static EnumPropertyItem rna_enum_language_default_items[] = {
 
 #include "UI_interface.h"
 
-#include "CCL_api.h"
-
 #ifdef WITH_OPENSUBDIV
 #  include "opensubdiv_capi.h"
 #endif
@@ -476,78 +465,6 @@ static PointerRNA rna_Theme_space_list_generic_get(PointerRNA *ptr)
 }
 
 
-#ifdef WITH_CYCLES
-static EnumPropertyItem *rna_userdef_compute_device_type_itemf(bContext *UNUSED(C), PointerRNA *UNUSED(ptr),
-                                                               PropertyRNA *UNUSED(prop), bool *r_free)
-{
-       EnumPropertyItem *item = NULL;
-       int totitem = 0;
-
-       /* add supported device types */
-       RNA_enum_items_add_value(&item, &totitem, compute_device_type_items, USER_COMPUTE_DEVICE_NONE);
-       if (CCL_compute_device_list(0))
-               RNA_enum_items_add_value(&item, &totitem, compute_device_type_items, USER_COMPUTE_DEVICE_CUDA);
-       if (CCL_compute_device_list(1))
-               RNA_enum_items_add_value(&item, &totitem, compute_device_type_items, USER_COMPUTE_DEVICE_OPENCL);
-
-       RNA_enum_item_end(&item, &totitem);
-       *r_free = true;
-
-       return item;
-}
-
-static int rna_userdef_compute_device_get(PointerRNA *UNUSED(ptr))
-{
-       if (U.compute_device_type == USER_COMPUTE_DEVICE_NONE)
-               return 0;
-
-       return U.compute_device_id;
-}
-
-static EnumPropertyItem *rna_userdef_compute_device_itemf(bContext *UNUSED(C), PointerRNA *UNUSED(ptr),
-                                                          PropertyRNA *UNUSED(prop), bool *r_free)
-{
-       EnumPropertyItem tmp = {0, "", 0, "", ""};
-       EnumPropertyItem *item = NULL;
-       int totitem = 0;
-       
-       if (U.compute_device_type == USER_COMPUTE_DEVICE_NONE) {
-               /* only add a single CPU device */
-               tmp.value = 0;
-               tmp.name = "CPU";
-               tmp.identifier = "CPU";
-               RNA_enum_item_add(&item, &totitem, &tmp);
-       }
-       else {
-               /* get device list from cycles. it would be good to make this generic
-                * once we have more subsystems using opencl, for now this is easiest */
-               int opencl = (U.compute_device_type == USER_COMPUTE_DEVICE_OPENCL);
-               CCLDeviceInfo *devices = CCL_compute_device_list(opencl);
-               int a;
-
-               if (devices) {
-                       for (a = 0; devices[a].identifier[0]; a++) {
-                               tmp.value = devices[a].value;
-                               tmp.identifier = devices[a].identifier;
-                               tmp.name = devices[a].name;
-                               RNA_enum_item_add(&item, &totitem, &tmp);
-                       }
-               }
-               else {
-                       tmp.value = 0;
-                       tmp.name = "CPU";
-                       tmp.identifier = "CPU";
-                       RNA_enum_item_add(&item, &totitem, &tmp);
-               }
-       }
-
-       RNA_enum_item_end(&item, &totitem);
-       *r_free = true;
-
-       return item;
-}
-#endif
-
 #ifdef WITH_OPENSUBDIV
 static EnumPropertyItem *rna_userdef_opensubdiv_compute_type_itemf(bContext *UNUSED(C), PointerRNA *UNUSED(ptr),
                                                                    PropertyRNA *UNUSED(prop), bool *r_free)
@@ -3977,13 +3894,6 @@ static void rna_def_userdef_system(BlenderRNA *brna)
                {0, NULL, 0, NULL, NULL}
        };
 
-#ifdef WITH_CYCLES
-       static EnumPropertyItem compute_device_items[] = {
-               {0, "CPU", 0, "CPU", ""},
-               { 0, NULL, 0, NULL, NULL}
-       };
-#endif
-
        static EnumPropertyItem image_draw_methods[] = {
                {IMAGE_DRAW_METHOD_2DTEXTURE, "2DTEXTURE", 0, "2D Texture", "Use CPU for display transform and draw image with 2D texture"},
                {IMAGE_DRAW_METHOD_GLSL, "GLSL", 0, "GLSL", "Use GLSL shaders for display transform and draw image with 2D texture"},
@@ -4275,23 +4185,6 @@ static void rna_def_userdef_system(BlenderRNA *brna)
                                 "Draw tool/property regions over the main region, when using Triple Buffer");
        RNA_def_property_update(prop, 0, "rna_userdef_dpi_update");     
 
-#ifdef WITH_CYCLES
-       prop = RNA_def_property(srna, "compute_device_type", PROP_ENUM, PROP_NONE);
-       RNA_def_property_flag(prop, PROP_ENUM_NO_CONTEXT);
-       RNA_def_property_enum_sdna(prop, NULL, "compute_device_type");
-       RNA_def_property_enum_items(prop, compute_device_type_items);
-       RNA_def_property_enum_funcs(prop, NULL, NULL, "rna_userdef_compute_device_type_itemf");
-       RNA_def_property_ui_text(prop, "Compute Device Type", "Device to use for computation (rendering with Cycles)");
-       RNA_def_property_update(prop, NC_SPACE | ND_SPACE_PROPERTIES, NULL);
-
-       prop = RNA_def_property(srna, "compute_device", PROP_ENUM, PROP_NONE);
-       RNA_def_property_flag(prop, PROP_ENUM_NO_CONTEXT);
-       RNA_def_property_enum_sdna(prop, NULL, "compute_device_id");
-       RNA_def_property_enum_items(prop, compute_device_items);
-       RNA_def_property_enum_funcs(prop, "rna_userdef_compute_device_get", NULL, "rna_userdef_compute_device_itemf");
-       RNA_def_property_ui_text(prop, "Compute Device", "Device to use for computation");
-#endif
-
 #ifdef WITH_OPENSUBDIV
        prop = RNA_def_property(srna, "opensubdiv_compute_type", PROP_ENUM, PROP_NONE);
        RNA_def_property_flag(prop, PROP_ENUM_NO_CONTEXT);
index d8a4ddc8d4fde69411181ee79adcc2f52ba6adb2..6040dfff64428dfcab22aed91093459a81956d18 100644 (file)
@@ -142,7 +142,6 @@ struct wmWindowManager;
 #  pragma GCC diagnostic ignored "-Wunused-parameter"
 #endif
 
-#include "../../intern/cycles/blender/CCL_api.h"
 #include "../../intern/dualcon/dualcon.h"
 #include "../../intern/elbeem/extern/elbeem.h"
 #include "../blender/blenkernel/BKE_modifier.h"
@@ -770,10 +769,6 @@ void *dualcon(const DualConInput *input_mesh,
               float scale,
               int depth) RET_ZERO
 
-/* intern/cycles */
-struct CCLDeviceInfo;
-struct CCLDeviceInfo *CCL_compute_device_list(int opencl) RET_NULL
-
 /* compositor */
 void COM_execute(RenderData *rd, Scene *scene, bNodeTree *editingtree, int rendering,
                  const ColorManagedViewSettings *viewSettings, const ColorManagedDisplaySettings *displaySettings,