Merge branch 'blender2.7'
authorBrecht Van Lommel <brechtvanlommel@gmail.com>
Wed, 30 Jan 2019 17:36:54 +0000 (18:36 +0100)
committerBrecht Van Lommel <brechtvanlommel@gmail.com>
Wed, 30 Jan 2019 17:36:54 +0000 (18:36 +0100)
intern/cycles/app/cycles_standalone.cpp
intern/cycles/blender/addon/properties.py
intern/cycles/blender/blender_python.cpp
intern/cycles/blender/blender_sync.cpp
intern/cycles/device/device.cpp
intern/cycles/device/device.h

index ff2e015..9c899a3 100644 (file)
@@ -363,13 +363,8 @@ static void options_parse(int argc, const char **argv)
        string devicename = "CPU";
        bool list = false;
 
-       vector<DeviceType>& types = Device::available_types();
-
-       /* TODO(sergey): Here's a feedback loop happens: on the one hand we want
-        * the device list to be printed in help message, on the other hand logging
-        * is not initialized yet so we wouldn't have debug log happening in the
-        * device initialization.
-        */
+       /* List devices for which support is compiled in. */
+       vector<DeviceType> types = Device::available_types();
        foreach(DeviceType type, types) {
                if(device_names != "")
                        device_names += ", ";
@@ -421,7 +416,7 @@ static void options_parse(int argc, const char **argv)
        }
 
        if(list) {
-               vector<DeviceInfo>& devices = Device::available_devices();
+               vector<DeviceInfo> devices = Device::available_devices();
                printf("Devices:\n");
 
                foreach(DeviceInfo& info, devices) {
@@ -456,15 +451,12 @@ static void options_parse(int argc, const char **argv)
 
        /* find matching device */
        DeviceType device_type = Device::type_from_string(devicename.c_str());
-       vector<DeviceInfo>& devices = Device::available_devices();
-       bool device_available = false;
+       vector<DeviceInfo> devices = Device::available_devices(DEVICE_MASK(device_type));
 
-       foreach(DeviceInfo& device, devices) {
-               if(device_type == device.type) {
-                       options.session_params.device = device;
-                       device_available = true;
-                       break;
-               }
+       bool device_available = false;
+       if (!devices.empty()) {
+               options.session_params.device = devices.front();
+               device_available = true;
        }
 
        /* handle invalid configurations */
index 8250100..c60db9f 100644 (file)
@@ -1453,7 +1453,7 @@ class CyclesPreferences(bpy.types.AddonPreferences):
     def get_devices(self):
         import _cycles
         # Layout of the device tuples: (Name, Type, Persistent ID)
-        device_list = _cycles.available_devices()
+        device_list = _cycles.available_devices(self.compute_device_type)
         # Make sure device entries are up to date and not referenced before
         # we know we don't add new devices. This way we guarantee to not
         # hold pointers to a resized array.
@@ -1477,7 +1477,7 @@ class CyclesPreferences(bpy.types.AddonPreferences):
 
     def get_num_gpu_devices(self):
         import _cycles
-        device_list = _cycles.available_devices()
+        device_list = _cycles.available_devices(self.compute_device_type)
         num = 0
         for device in device_list:
             if device[1] != self.compute_device_type:
@@ -1490,25 +1490,32 @@ class CyclesPreferences(bpy.types.AddonPreferences):
     def has_active_device(self):
         return self.get_num_gpu_devices() > 0
 
-    def draw_impl(self, layout, context):
-        available_device_types = self.get_device_types(context)
-        if len(available_device_types) == 1:
-            layout.label(text="No compatible GPUs found", icon='INFO')
+    def _draw_devices(self, layout, device_type, devices):
+        box = layout.box()
+
+        found_device = False
+        for device in devices:
+            if device.type == device_type:
+                found_device = True
+                break
+
+        if not found_device:
+            box.label(text="No compatible GPUs found", icon='INFO')
             return
-        layout.row().prop(self, "compute_device_type", expand=True)
 
-        cuda_devices, opencl_devices = self.get_devices()
-        row = layout.row()
+        for device in devices:
+            box.prop(device, "use", text=device.name)
 
-        if self.compute_device_type == 'CUDA' and cuda_devices:
-            box = row.box()
-            for device in cuda_devices:
-                box.prop(device, "use", text=device.name)
+    def draw_impl(self, layout, context):
+        row = layout.row()
+        row.prop(self, "compute_device_type", expand=True)
 
-        if self.compute_device_type == 'OPENCL' and opencl_devices:
-            box = row.box()
-            for device in opencl_devices:
-                box.prop(device, "use", text=device.name)
+        cuda_devices, opencl_devices = self.get_devices()
+        row = layout.row()
+        if self.compute_device_type == 'CUDA':
+            self._draw_devices(row, 'CUDA', cuda_devices)
+        elif self.compute_device_type == 'OPENCL':
+            self._draw_devices(row, 'OPENCL', opencl_devices)
 
     def draw(self, context):
         self.draw_impl(self.layout, context)
index bf5c243..d111a1c 100644 (file)
@@ -388,9 +388,18 @@ static PyObject *sync_func(PyObject * /*self*/, PyObject *args)
        Py_RETURN_NONE;
 }
 
-static PyObject *available_devices_func(PyObject * /*self*/, PyObject * /*args*/)
+static PyObject *available_devices_func(PyObject * /*self*/, PyObject * args)
 {
-       vector<DeviceInfo>& devices = Device::available_devices();
+       const char *type_name;
+       if(!PyArg_ParseTuple(args, "s", &type_name)) {
+               return NULL;
+       }
+
+       DeviceType type = Device::type_from_string(type_name);
+       uint mask = (type == DEVICE_NONE) ? DEVICE_MASK_ALL : DEVICE_MASK(type);
+       mask |= DEVICE_MASK_CPU;
+
+       vector<DeviceInfo> devices = Device::available_devices(mask);
        PyObject *ret = PyTuple_New(devices.size());
 
        for(size_t i = 0; i < devices.size(); i++) {
@@ -746,11 +755,11 @@ static PyObject *enable_print_stats_func(PyObject * /*self*/, PyObject * /*args*
 
 static PyObject *get_device_types_func(PyObject * /*self*/, PyObject * /*args*/)
 {
-       vector<DeviceInfo>& devices = Device::available_devices();
+       vector<DeviceType> device_types = Device::available_types();
        bool has_cuda = false, has_opencl = false;
-       for(int i = 0; i < devices.size(); i++) {
-               has_cuda   |= (devices[i].type == DEVICE_CUDA);
-               has_opencl |= (devices[i].type == DEVICE_OPENCL);
+       foreach(DeviceType device_type, device_types) {
+               has_cuda   |= (device_type == DEVICE_CUDA);
+               has_opencl |= (device_type == DEVICE_OPENCL);
        }
        PyObject *list = PyTuple_New(2);
        PyTuple_SET_ITEM(list, 0, PyBool_FromLong(has_cuda));
@@ -772,7 +781,7 @@ static PyMethodDef methods[] = {
        {"osl_update_node", osl_update_node_func, METH_VARARGS, ""},
        {"osl_compile", osl_compile_func, METH_VARARGS, ""},
 #endif
-       {"available_devices", available_devices_func, METH_NOARGS, ""},
+       {"available_devices", available_devices_func, METH_VARARGS, ""},
        {"system_info", system_info_func, METH_NOARGS, ""},
 #ifdef WITH_OPENCL
        {"opencl_disable", opencl_disable_func, METH_NOARGS, ""},
index 60192bf..e41a80a 100644 (file)
@@ -741,24 +741,18 @@ SessionParams BlenderSync::get_session_params(BL::RenderEngine& b_engine,
        /* Background */
        params.background = background;
 
-       /* device type */
-       vector<DeviceInfo>& devices = Device::available_devices();
-
-       /* device default CPU */
-       foreach(DeviceInfo& device, devices) {
-               if(device.type == DEVICE_CPU) {
-                       params.device = device;
-                       break;
-               }
-       }
+       /* Default to CPU device. */
+       params.device = Device::available_devices(DEVICE_MASK_CPU).front();
 
        if(get_enum(cscene, "device") == 2) {
-               /* find network device */
-               foreach(DeviceInfo& info, devices)
-                       if(info.type == DEVICE_NETWORK)
-                               params.device = info;
+               /* Find network device. */
+               vector<DeviceInfo> devices = Device::available_devices(DEVICE_MASK_NETWORK);
+               if(!devices.empty()) {
+                       params.device = devices.front();
+               }
        }
        else if(get_enum(cscene, "device") == 1) {
+               /* Find cycles preferences. */
                PointerRNA b_preferences;
 
                BL::Preferences::addons_iterator b_addon_iter;
@@ -769,6 +763,7 @@ SessionParams BlenderSync::get_session_params(BL::RenderEngine& b_engine,
                        }
                }
 
+               /* Test if we are using GPU devices. */
                enum ComputeDevice {
                        COMPUTE_DEVICE_CPU = 0,
                        COMPUTE_DEVICE_CUDA = 1,
@@ -782,15 +777,20 @@ SessionParams BlenderSync::get_session_params(BL::RenderEngine& b_engine,
                                                                       COMPUTE_DEVICE_CPU);
 
                if(compute_device != COMPUTE_DEVICE_CPU) {
+                       /* Query GPU devices with matching types. */
+                       uint mask = DEVICE_MASK_CPU;
+                       if(compute_device == COMPUTE_DEVICE_CUDA) {
+                               mask |= DEVICE_MASK_CUDA;
+                       }
+                       else if(compute_device == COMPUTE_DEVICE_OPENCL) {
+                               mask |= DEVICE_MASK_OPENCL;
+                       }
+                       vector<DeviceInfo> devices = Device::available_devices(mask);
+
+                       /* Match device preferences and available devices. */
                        vector<DeviceInfo> used_devices;
                        RNA_BEGIN(&b_preferences, device, "devices") {
-                               ComputeDevice device_type = (ComputeDevice)get_enum(device,
-                                                                                   "type",
-                                                                                   COMPUTE_DEVICE_NUM,
-                                                                                   COMPUTE_DEVICE_CPU);
-
-                               if(get_boolean(device, "use") &&
-                                  (device_type == compute_device || device_type == COMPUTE_DEVICE_CPU)) {
+                               if(get_boolean(device, "use")) {
                                        string id = get_string(device, "id");
                                        foreach(DeviceInfo& info, devices) {
                                                if(info.id == id) {
@@ -801,10 +801,7 @@ SessionParams BlenderSync::get_session_params(BL::RenderEngine& b_engine,
                                }
                        } RNA_END;
 
-                       if(used_devices.size() == 1) {
-                               params.device = used_devices[0];
-                       }
-                       else if(used_devices.size() > 1) {
+                       if(!used_devices.empty()) {
                                params.device = Device::get_multi_device(used_devices,
                                                                         params.threads,
                                                                         params.background);
index 6ff94b4..5b53dc9 100644 (file)
@@ -36,8 +36,11 @@ CCL_NAMESPACE_BEGIN
 bool Device::need_types_update = true;
 bool Device::need_devices_update = true;
 thread_mutex Device::device_mutex;
-vector<DeviceType> Device::types;
-vector<DeviceInfo> Device::devices;
+vector<DeviceInfo> Device::opencl_devices;
+vector<DeviceInfo> Device::cuda_devices;
+vector<DeviceInfo> Device::cpu_devices;
+vector<DeviceInfo> Device::network_devices;
+uint Device::devices_initialized_mask = 0;
 
 /* Device Requested Features */
 
@@ -423,70 +426,108 @@ string Device::string_from_type(DeviceType type)
        return "";
 }
 
-vector<DeviceType>& Device::available_types()
+vector<DeviceType> Device::available_types()
 {
-       thread_scoped_lock lock(device_mutex);
-       if(need_types_update) {
-               types.clear();
-               types.push_back(DEVICE_CPU);
+       vector<DeviceType> types;
+       types.push_back(DEVICE_CPU);
 #ifdef WITH_CUDA
-               if(device_cuda_init()) {
-                       types.push_back(DEVICE_CUDA);
-               }
+       types.push_back(DEVICE_CUDA);
 #endif
 #ifdef WITH_OPENCL
-               if(device_opencl_init()) {
-                       types.push_back(DEVICE_OPENCL);
-               }
+       types.push_back(DEVICE_OPENCL);
 #endif
 #ifdef WITH_NETWORK
-               types.push_back(DEVICE_NETWORK);
+       types.push_back(DEVICE_NETWORK);
 #endif
-               need_types_update = false;
-       }
        return types;
 }
 
-vector<DeviceInfo>& Device::available_devices()
+vector<DeviceInfo> Device::available_devices(uint mask)
 {
+       /* Lazy initialize devices. On some platforms OpenCL or CUDA drivers can
+        * be broken and cause crashes when only trying to get device info, so
+        * we don't want to do any initialization until the user chooses to. */
        thread_scoped_lock lock(device_mutex);
-       if(need_devices_update) {
-               devices.clear();
+       vector<DeviceInfo> devices;
+
 #ifdef WITH_OPENCL
-               if(device_opencl_init()) {
-                       device_opencl_info(devices);
+       if(mask & DEVICE_MASK_OPENCL) {
+               if(!(devices_initialized_mask & DEVICE_MASK_OPENCL)) {
+                       if(device_opencl_init()) {
+                               device_opencl_info(opencl_devices);
+                       }
+                       devices_initialized_mask |= DEVICE_MASK_OPENCL;
                }
+               foreach(DeviceInfo& info, opencl_devices) {
+                       devices.push_back(info);
+               }
+       }
 #endif
+
 #ifdef WITH_CUDA
-               if(device_cuda_init()) {
-                       device_cuda_info(devices);
+       if(mask & DEVICE_MASK_CUDA) {
+               if(!(devices_initialized_mask & DEVICE_MASK_CUDA)) {
+                       if(device_cuda_init()) {
+                               device_cuda_info(cuda_devices);
+                       }
+                       devices_initialized_mask |= DEVICE_MASK_CUDA;
                }
+               foreach(DeviceInfo& info, cuda_devices) {
+                       devices.push_back(info);
+               }
+       }
 #endif
-               device_cpu_info(devices);
+
+       if(mask & DEVICE_MASK_CPU) {
+               if(!(devices_initialized_mask & DEVICE_MASK_CPU)) {
+                       device_cpu_info(cpu_devices);
+                       devices_initialized_mask |= DEVICE_MASK_CPU;
+               }
+               foreach(DeviceInfo& info, cpu_devices) {
+                       devices.push_back(info);
+               }
+       }
+
 #ifdef WITH_NETWORK
-               device_network_info(devices);
-#endif
-               need_devices_update = false;
+       if(mask & DEVICE_MASK_NETWORK) {
+               if(!(devices_initialized_mask & DEVICE_MASK_NETWORK)) {
+                       device_network_info(network_devices);
+                       devices_initialized_mask |= DEVICE_MASK_NETWORK;
+               }
+               foreach(DeviceInfo& info, network_devices) {
+                       devices.push_back(info);
+               }
        }
+#endif
+
        return devices;
 }
 
-string Device::device_capabilities()
+string Device::device_capabilities(uint mask)
 {
-       string capabilities = "CPU device capabilities: ";
-       capabilities += device_cpu_capabilities() + "\n";
+       thread_scoped_lock lock(device_mutex);
+       string capabilities = "";
+
+       if(mask & DEVICE_MASK_CPU) {
+               capabilities += "\nCPU device capabilities: ";
+               capabilities += device_cpu_capabilities() + "\n";
+       }
 
 #ifdef WITH_OPENCL
-       if(device_opencl_init()) {
-               capabilities += "\nOpenCL device capabilities:\n";
-               capabilities += device_opencl_capabilities();
+       if(mask & DEVICE_MASK_OPENCL) {
+               if(device_opencl_init()) {
+                       capabilities += "\nOpenCL device capabilities:\n";
+                       capabilities += device_opencl_capabilities();
+               }
        }
 #endif
 
 #ifdef WITH_CUDA
-       if(device_cuda_init()) {
-               capabilities += "\nCUDA device capabilities:\n";
-               capabilities += device_cuda_capabilities();
+       if(mask & DEVICE_MASK_CUDA) {
+               if(device_cuda_init()) {
+                       capabilities += "\nCUDA device capabilities:\n";
+                       capabilities += device_cuda_capabilities();
+               }
        }
 #endif
 
@@ -495,7 +536,12 @@ string Device::device_capabilities()
 
 DeviceInfo Device::get_multi_device(const vector<DeviceInfo>& subdevices, int threads, bool background)
 {
-       assert(subdevices.size() > 1);
+       assert(subdevices.size() > 0);
+
+       if(subdevices.size() == 1) {
+               /* No multi device needed. */
+               return subdevices.front();
+       }
 
        DeviceInfo info;
        info.type = DEVICE_MULTI;
@@ -549,16 +595,16 @@ DeviceInfo Device::get_multi_device(const vector<DeviceInfo>& subdevices, int th
 
 void Device::tag_update()
 {
-       need_types_update = true;
-       need_devices_update = true;
+       free_memory();
 }
 
 void Device::free_memory()
 {
-       need_types_update = true;
-       need_devices_update = true;
-       types.free_memory();
-       devices.free_memory();
+       devices_initialized_mask = 0;
+       cuda_devices.clear();
+       opencl_devices.clear();
+       cpu_devices.clear();
+       network_devices.clear();
 }
 
 CCL_NAMESPACE_END
index fc9e57d..f58ce0a 100644 (file)
@@ -40,7 +40,7 @@ class RenderTile;
 /* Device Types */
 
 enum DeviceType {
-       DEVICE_NONE,
+       DEVICE_NONE = 0,
        DEVICE_CPU,
        DEVICE_OPENCL,
        DEVICE_CUDA,
@@ -48,6 +48,16 @@ enum DeviceType {
        DEVICE_MULTI
 };
 
+enum DeviceTypeMask {
+       DEVICE_MASK_CPU = (1 << DEVICE_CPU),
+       DEVICE_MASK_OPENCL = (1 << DEVICE_OPENCL),
+       DEVICE_MASK_CUDA = (1 << DEVICE_CUDA),
+       DEVICE_MASK_NETWORK = (1 << DEVICE_NETWORK),
+       DEVICE_MASK_ALL = ~0
+};
+
+#define DEVICE_MASK(type) (DeviceTypeMask)(1 << type)
+
 class DeviceInfo {
 public:
        DeviceType type;
@@ -342,9 +352,9 @@ public:
 
        static DeviceType type_from_string(const char *name);
        static string string_from_type(DeviceType type);
-       static vector<DeviceType>& available_types();
-       static vector<DeviceInfo>& available_devices();
-       static string device_capabilities();
+       static vector<DeviceType> available_types();
+       static vector<DeviceInfo> available_devices(uint device_type_mask = DEVICE_MASK_ALL);
+       static string device_capabilities(uint device_type_mask = DEVICE_MASK_ALL);
        static DeviceInfo get_multi_device(const vector<DeviceInfo>& subdevices,
                                           int threads,
                                           bool background);
@@ -371,8 +381,11 @@ private:
        /* Indicted whether device types and devices lists were initialized. */
        static bool need_types_update, need_devices_update;
        static thread_mutex device_mutex;
-       static vector<DeviceType> types;
-       static vector<DeviceInfo> devices;
+       static vector<DeviceInfo> cuda_devices;
+       static vector<DeviceInfo> opencl_devices;
+       static vector<DeviceInfo> cpu_devices;
+       static vector<DeviceInfo> network_devices;
+       static uint devices_initialized_mask;
 };
 
 CCL_NAMESPACE_END