#define __KERNEL_GLOBALS_H__
#ifdef __KERNEL_CPU__
-#include "util/util_vector.h"
+# include "util/util_vector.h"
#endif
CCL_NAMESPACE_BEGIN
return x - (float)i;
}
+ccl_device_inline uint kernel_decode_image_interpolation(uint4 info)
+{
+ return (info.w & (1 << 0)) ? INTERPOLATION_CLOSEST : INTERPOLATION_LINEAR;
+}
+
+ccl_device_inline uint kernel_decode_image_extension(uint4 info)
+{
+ if(info.w & (1 << 1)) {
+ return EXTENSION_REPEAT;
+ }
+ else if(info.w & (1 << 2)) {
+ return EXTENSION_EXTEND;
+ }
+ else {
+ return EXTENSION_CLIP;
+ }
+}
+
ccl_device float4 kernel_tex_image_interp(KernelGlobals *kg, int id, float x, float y)
{
uint4 info = kernel_tex_fetch(__tex_image_packed_info, id*2);
uint width = info.x;
uint height = info.y;
uint offset = info.z;
-
- /* Image Options */
- uint interpolation = (info.w & (1 << 0)) ? INTERPOLATION_CLOSEST : INTERPOLATION_LINEAR;
- uint extension;
- if(info.w & (1 << 1))
- extension = EXTENSION_REPEAT;
- else if(info.w & (1 << 2))
- extension = EXTENSION_EXTEND;
- else
- extension = EXTENSION_CLIP;
-
+ /* Decode image options. */
+ uint interpolation = kernel_decode_image_interpolation(info);
+ uint extension = kernel_decode_image_extension(info);
+ /* Actual sampling. */
float4 r;
int ix, iy, nix, niy;
if(interpolation == INTERPOLATION_CLOSEST) {
r += ty*(1.0f - tx)*svm_image_texture_read(kg, id, offset + ix + niy*width);
r += ty*tx*svm_image_texture_read(kg, id, offset + nix + niy*width);
}
-
return r;
}
uint height = info.y;
uint offset = info.z;
uint depth = kernel_tex_fetch(__tex_image_packed_info, id*2+1).x;
-
- /* Image Options */
- uint interpolation = (info.w & (1 << 0)) ? INTERPOLATION_CLOSEST : INTERPOLATION_LINEAR;
- uint extension;
- if(info.w & (1 << 1))
- extension = EXTENSION_REPEAT;
- else if(info.w & (1 << 2))
- extension = EXTENSION_EXTEND;
- else
- extension = EXTENSION_CLIP;
-
+ /* Decode image options. */
+ uint interpolation = kernel_decode_image_interpolation(info);
+ uint extension = kernel_decode_image_extension(info);
+ /* Actual sampling. */
float4 r;
int ix, iy, iz, nix, niy, niz;
if(interpolation == INTERPOLATION_CLOSEST) {
if(extension == EXTENSION_CLIP) {
if(x < 0.0f || y < 0.0f || z < 0.0f ||
x > 1.0f || y > 1.0f || z > 1.0f)
- {
+ {
return make_float4(0.0f, 0.0f, 0.0f, 0.0f);
}
}
niz = svm_image_texture_wrap_periodic(iz+1, depth);
}
else {
- if(extension == EXTENSION_CLIP)
+ if(extension == EXTENSION_CLIP) {
if(x < 0.0f || y < 0.0f || z < 0.0f ||
x > 1.0f || y > 1.0f || z > 1.0f)
{
return make_float4(0.0f, 0.0f, 0.0f, 0.0f);
}
+ }
/* Fall through. */
/* EXTENSION_EXTEND */
nix = svm_image_texture_wrap_clamp(ix+1, width);
r += tz*(1.0f - ty)*tx*svm_image_texture_read(kg, id, offset + nix + iy*width + niz*width*height);
r += tz*ty*(1.0f - tx)*svm_image_texture_read(kg, id, offset + ix + niy*width + niz*width*height);
r += tz*ty*tx*svm_image_texture_read(kg, id, offset + nix + niy*width + niz*width*height);
-
}
-
return r;
}
texture_image_float *tex = NULL;
int id = atoi(name + strlen("__tex_image_float_"));
int array_index = kernel_tex_index(id);
-
+
if(array_index >= 0) {
if(array_index >= kg->texture_float_images.size()) {
kg->texture_float_images.resize(array_index+1);
texture_image_uchar4 *tex = NULL;
int id = atoi(name + strlen("__tex_image_byte4_"));
int array_index = kernel_tex_index(id);
-
+
if(array_index >= 0) {
if(array_index >= kg->texture_byte4_images.size()) {
kg->texture_byte4_images.resize(array_index+1);
texture_image_uchar *tex = NULL;
int id = atoi(name + strlen("__tex_image_byte_"));
int array_index = kernel_tex_index(id);
-
+
if(array_index >= 0) {
if(array_index >= kg->texture_byte_images.size()) {
kg->texture_byte_images.resize(array_index+1);
texture_image_half4 *tex = NULL;
int id = atoi(name + strlen("__tex_image_half4_"));
int array_index = kernel_tex_index(id);
-
+
if(array_index >= 0) {
if(array_index >= kg->texture_half4_images.size()) {
kg->texture_half4_images.resize(array_index+1);
texture_image_half *tex = NULL;
int id = atoi(name + strlen("__tex_image_half_"));
int array_index = kernel_tex_index(id);
-
+
if(array_index >= 0) {
if(array_index >= kg->texture_half_images.size()) {
kg->texture_half_images.resize(array_index+1);
# if defined(__KERNEL_CUDA__)
# if __CUDA_ARCH__ >= 300
CUtexObject tex = kernel_tex_fetch(__bindless_mapping, id);
- if(kernel_tex_type(id) == IMAGE_DATA_TYPE_FLOAT4 || kernel_tex_type(id) == IMAGE_DATA_TYPE_BYTE4 || kernel_tex_type(id) == IMAGE_DATA_TYPE_HALF4)
+ const int texture_type = kernel_tex_type(id);
+ if(texture_type == IMAGE_DATA_TYPE_FLOAT4 ||
+ texture_type == IMAGE_DATA_TYPE_BYTE4 ||
+ texture_type == IMAGE_DATA_TYPE_HALF4)
+ {
r = kernel_tex_image_interp_3d_float4(tex, co.x, co.y, co.z);
+ }
else {
float f = kernel_tex_image_interp_3d_float(tex, co.x, co.y, co.z);
r = make_float4(f, f, f, 1.0f);
max_num_images = TEX_NUM_MAX;
has_half_images = true;
cuda_fermi_limits = false;
-
+
if(device_type == DEVICE_CUDA) {
if(!info.has_bindless_textures) {
/* CUDA Fermi hardware (SM 2.x) has a hard limit on the number of textures */
else if(device_type == DEVICE_OPENCL) {
has_half_images = false;
}
-
+
for(size_t type = 0; type < IMAGE_DATA_NUM_TYPES; type++) {
tex_num_images[type] = 0;
}
}
ImageDataType ImageManager::get_image_metadata(const string& filename,
- void *builtin_data,
- bool& is_linear)
+ void *builtin_data,
+ bool& is_linear)
{
bool is_float = false, is_half = false;
is_linear = false;
}
}
+int ImageManager::max_flattened_slot(ImageDataType type)
+{
+ if(tex_num_images[type] == 0) {
+ /* No textures for the type, no slots needs allocation. */
+ return 0;
+ }
+ return type_index_to_flattened_slot(tex_num_images[type], type);
+}
+
/* The lower three bits of a device texture slot number indicate its type.
* These functions convert the slot ids from ImageManager "images" ones
- * to device ones and vice versa.
+ * to device ones and vice verse.
*
* There are special cases for CUDA Fermi, since there we have only 90 image texture
- * slots available and shold keep the flattended numbers in the 0-89 range.
+ * slots available and should keep the flattended numbers in the 0-89 range.
*/
int ImageManager::type_index_to_flattened_slot(int slot, ImageDataType type)
{
return -1;
}
}
-
+
if(slot == images[type].size()) {
images[type].resize(images[type].size() + 1);
}
img->use_alpha = use_alpha;
images[type][slot] = img;
-
+
++tex_num_images[type];
need_update = true;
uint8_t ImageManager::pack_image_options(ImageDataType type, size_t slot)
{
uint8_t options = 0;
-
/* Image Options are packed into one uint:
* bit 0 -> Interpolation
- * bit 1 + 2 + 3-> Extension */
- if(images[type][slot]->interpolation == INTERPOLATION_CLOSEST)
+ * bit 1 + 2 + 3 -> Extension
+ */
+ if(images[type][slot]->interpolation == INTERPOLATION_CLOSEST) {
options |= (1 << 0);
-
- if(images[type][slot]->extension == EXTENSION_REPEAT)
+ }
+ if(images[type][slot]->extension == EXTENSION_REPEAT) {
options |= (1 << 1);
- else if(images[type][slot]->extension == EXTENSION_EXTEND)
+ }
+ else if(images[type][slot]->extension == EXTENSION_EXTEND) {
options |= (1 << 2);
- else /* EXTENSION_CLIP */
+ }
+ else /* EXTENSION_CLIP */ {
options |= (1 << 3);
-
+ }
return options;
}
-void ImageManager::device_pack_images(Device *device,
- DeviceScene *dscene,
- Progress& /*progess*/)
+template<typename T>
+void ImageManager::device_pack_images_type(
+ ImageDataType type,
+ const vector<device_vector<T>*>& cpu_textures,
+ device_vector<T> *device_image,
+ uint4 *info)
{
- /* For OpenCL, we pack all image textures into a single large texture, and
- * do our own interpolation in the kernel. */
size_t size = 0, offset = 0;
- ImageDataType type;
-
- int info_size = tex_num_images[IMAGE_DATA_TYPE_FLOAT4] + tex_num_images[IMAGE_DATA_TYPE_BYTE4]
- + tex_num_images[IMAGE_DATA_TYPE_FLOAT] + tex_num_images[IMAGE_DATA_TYPE_BYTE];
- uint4 *info = dscene->tex_image_packed_info.resize(info_size*2);
-
- /* Byte4 Textures*/
- type = IMAGE_DATA_TYPE_BYTE4;
-
- for(size_t slot = 0; slot < images[type].size(); slot++) {
- if(!images[type][slot])
- continue;
-
- device_vector<uchar4>& tex_img = *dscene->tex_byte4_image[slot];
- size += tex_img.size();
- }
-
- uchar4 *pixels_byte4 = dscene->tex_image_byte4_packed.resize(size);
-
- for(size_t slot = 0; slot < images[type].size(); slot++) {
- if(!images[type][slot])
- continue;
-
- device_vector<uchar4>& tex_img = *dscene->tex_byte4_image[slot];
-
- uint8_t options = pack_image_options(type, slot);
-
- int index = type_index_to_flattened_slot(slot, type) * 2;
- info[index] = make_uint4(tex_img.data_width, tex_img.data_height, offset, options);
- info[index+1] = make_uint4(tex_img.data_depth, 0, 0, 0);
-
- memcpy(pixels_byte4+offset, (void*)tex_img.data_pointer, tex_img.memory_size());
- offset += tex_img.size();
- }
-
- /* Float4 Textures*/
- type = IMAGE_DATA_TYPE_FLOAT4;
- size = 0, offset = 0;
-
- for(size_t slot = 0; slot < images[type].size(); slot++) {
- if(!images[type][slot])
- continue;
-
- device_vector<float4>& tex_img = *dscene->tex_float4_image[slot];
- size += tex_img.size();
- }
-
- float4 *pixels_float4 = dscene->tex_image_float4_packed.resize(size);
-
- for(size_t slot = 0; slot < images[type].size(); slot++) {
- if(!images[type][slot])
- continue;
-
- device_vector<float4>& tex_img = *dscene->tex_float4_image[slot];
-
- /* todo: support 3D textures, only CPU for now */
-
- uint8_t options = pack_image_options(type, slot);
-
- int index = type_index_to_flattened_slot(slot, type) * 2;
- info[index] = make_uint4(tex_img.data_width, tex_img.data_height, offset, options);
- info[index+1] = make_uint4(tex_img.data_depth, 0, 0, 0);
-
- memcpy(pixels_float4+offset, (void*)tex_img.data_pointer, tex_img.memory_size());
- offset += tex_img.size();
- }
-
- /* Byte Textures*/
- type = IMAGE_DATA_TYPE_BYTE;
- size = 0, offset = 0;
-
+ /* First step is to calculate size of the texture we need. */
for(size_t slot = 0; slot < images[type].size(); slot++) {
- if(!images[type][slot])
+ if(images[type][slot] == NULL) {
continue;
-
- device_vector<uchar>& tex_img = *dscene->tex_byte_image[slot];
+ }
+ device_vector<T>& tex_img = *cpu_textures[slot];
size += tex_img.size();
}
-
- uchar *pixels_byte = dscene->tex_image_byte_packed.resize(size);
-
+ /* Now we know how much memory we need, so we can allocate and fill. */
+ T *pixels = device_image->resize(size);
for(size_t slot = 0; slot < images[type].size(); slot++) {
- if(!images[type][slot])
+ if(images[type][slot] == NULL) {
continue;
-
- device_vector<uchar>& tex_img = *dscene->tex_byte_image[slot];
-
+ }
+ device_vector<T>& tex_img = *cpu_textures[slot];
uint8_t options = pack_image_options(type, slot);
-
- int index = type_index_to_flattened_slot(slot, type) * 2;
- info[index] = make_uint4(tex_img.data_width, tex_img.data_height, offset, options);
+ const int index = type_index_to_flattened_slot(slot, type) * 2;
+ info[index] = make_uint4(tex_img.data_width,
+ tex_img.data_height,
+ offset,
+ options);
info[index+1] = make_uint4(tex_img.data_depth, 0, 0, 0);
-
- memcpy(pixels_byte+offset, (void*)tex_img.data_pointer, tex_img.memory_size());
+ memcpy(pixels + offset,
+ (void*)tex_img.data_pointer,
+ tex_img.memory_size());
offset += tex_img.size();
}
+}
- /* Float Textures*/
- type = IMAGE_DATA_TYPE_FLOAT;
- size = 0, offset = 0;
-
- for(size_t slot = 0; slot < images[type].size(); slot++) {
- if(!images[type][slot])
- continue;
-
- device_vector<float>& tex_img = *dscene->tex_float_image[slot];
- size += tex_img.size();
- }
-
- float *pixels_float = dscene->tex_image_float_packed.resize(size);
-
- for(size_t slot = 0; slot < images[type].size(); slot++) {
- if(!images[type][slot])
- continue;
-
- device_vector<float>& tex_img = *dscene->tex_float_image[slot];
-
- /* todo: support 3D textures, only CPU for now */
-
- uint8_t options = pack_image_options(type, slot);
-
- int index = type_index_to_flattened_slot(slot, type) * 2;
- info[index] = make_uint4(tex_img.data_width, tex_img.data_height, offset, options);
- info[index+1] = make_uint4(tex_img.data_depth, 0, 0, 0);
+void ImageManager::device_pack_images(Device *device,
+ DeviceScene *dscene,
+ Progress& /*progess*/)
+{
+ /* For OpenCL, we pack all image textures into a single large texture, and
+ * do our own interpolation in the kernel.
+ */
- memcpy(pixels_float+offset, (void*)tex_img.data_pointer, tex_img.memory_size());
- offset += tex_img.size();
- }
+ /* TODO(sergey): This will over-allocate a bit, but this is constant memory
+ * so should be fine for a short term.
+ */
+ const size_t info_size = max4(max_flattened_slot(IMAGE_DATA_TYPE_FLOAT4),
+ max_flattened_slot(IMAGE_DATA_TYPE_BYTE4),
+ max_flattened_slot(IMAGE_DATA_TYPE_FLOAT),
+ max_flattened_slot(IMAGE_DATA_TYPE_BYTE));
+ uint4 *info = dscene->tex_image_packed_info.resize(info_size*2);
+ /* Pack byte4 textures. */
+ device_pack_images_type(IMAGE_DATA_TYPE_BYTE4,
+ dscene->tex_byte4_image,
+ &dscene->tex_image_byte4_packed,
+ info);
+ /* Pack float4 textures. */
+ device_pack_images_type(IMAGE_DATA_TYPE_FLOAT4,
+ dscene->tex_float4_image,
+ &dscene->tex_image_float4_packed,
+ info);
+ /* Pack byte textures. */
+ device_pack_images_type(IMAGE_DATA_TYPE_BYTE,
+ dscene->tex_byte_image,
+ &dscene->tex_image_byte_packed,
+ info);
+ /* Pack float textures. */
+ device_pack_images_type(IMAGE_DATA_TYPE_FLOAT,
+ dscene->tex_float_image,
+ &dscene->tex_image_float_packed,
+ info);
+
+ /* Push textures to the device. */
if(dscene->tex_image_byte4_packed.size()) {
if(dscene->tex_image_byte4_packed.device_pointer) {
thread_scoped_lock device_lock(device_mutex);
int texture_limit,
device_vector<DeviceType>& tex_img);
+ int max_flattened_slot(ImageDataType type);
int type_index_to_flattened_slot(int slot, ImageDataType type);
int flattened_slot_to_type_index(int flat_slot, ImageDataType *type);
string name_from_type(int type);
ImageDataType type,
int slot);
+ template<typename T>
+ void device_pack_images_type(
+ ImageDataType type,
+ const vector<device_vector<T>*>& cpu_textures,
+ device_vector<T> *device_image,
+ uint4 *info);
+
void device_pack_images(Device *device,
DeviceScene *dscene,
Progress& progess);
device_vector<uint> sobol_directions;
/* cpu images */
- std::vector<device_vector<float4>* > tex_float4_image;
- std::vector<device_vector<uchar4>* > tex_byte4_image;
- std::vector<device_vector<half4>* > tex_half4_image;
- std::vector<device_vector<float>* > tex_float_image;
- std::vector<device_vector<uchar>* > tex_byte_image;
- std::vector<device_vector<half>* > tex_half_image;
-
+ vector<device_vector<float4>* > tex_float4_image;
+ vector<device_vector<uchar4>* > tex_byte4_image;
+ vector<device_vector<half4>* > tex_half4_image;
+ vector<device_vector<float>* > tex_float_image;
+ vector<device_vector<uchar>* > tex_byte_image;
+ vector<device_vector<half>* > tex_half_image;
+
/* opencl images */
device_vector<float4> tex_image_float4_packed;
device_vector<uchar4> tex_image_byte4_packed;
/* Texture types
* Since we store the type in the lower bits of a flat index,
- * the shift and bit mask constant below need to be kept in sync.
+ * the shift and bit mask constant below need to be kept in sync.
*/
enum ImageDataType {
IMAGE_DATA_TYPE_FLOAT = 3,
IMAGE_DATA_TYPE_BYTE = 4,
IMAGE_DATA_TYPE_HALF = 5,
-
+
IMAGE_DATA_NUM_TYPES
};
return m_schema.valid();
}
-void AbcCameraReader::readObjectData(Main *bmain, float time)
+bool AbcCameraReader::accepts_object_type(const Alembic::AbcCoreAbstract::ObjectHeader &alembic_header,
+ const Object *const ob,
+ const char **err_str) const
+{
+ if (!Alembic::AbcGeom::ICamera::matches(alembic_header)) {
+ *err_str = "Object type mismatch, Alembic object path pointed to Camera when importing, but not any more.";
+ return false;
+ }
+
+ if (ob->type != OB_CAMERA) {
+ *err_str = "Object type mismatch, Alembic object path points to Camera.";
+ return false;
+ }
+
+ return true;
+}
+
+void AbcCameraReader::readObjectData(Main *bmain, const ISampleSelector &sample_sel)
{
Camera *bcam = static_cast<Camera *>(BKE_camera_add(bmain, m_data_name.c_str()));
- ISampleSelector sample_sel(time);
CameraSample cam_sample;
m_schema.get(cam_sample, sample_sel);
AbcCameraReader(const Alembic::Abc::IObject &object, ImportSettings &settings);
bool valid() const;
+ bool accepts_object_type(const Alembic::AbcCoreAbstract::ObjectHeader &alembic_header,
+ const Object *const ob,
+ const char **err_str) const;
- void readObjectData(Main *bmain, float time);
+ void readObjectData(Main *bmain, const Alembic::Abc::ISampleSelector &sample_sel);
};
#endif /* __ABC_CAMERA_H__ */
return m_curves_schema.valid();
}
-void AbcCurveReader::readObjectData(Main *bmain, float time)
+bool AbcCurveReader::accepts_object_type(const Alembic::AbcCoreAbstract::ObjectHeader &alembic_header,
+ const Object *const ob,
+ const char **err_str) const
+{
+ if (!Alembic::AbcGeom::ICurves::matches(alembic_header)) {
+ *err_str = "Object type mismatch, Alembic object path pointed to Curves when importing, but not any more.";
+ return false;
+ }
+
+ if (ob->type != OB_EMPTY) {
+ *err_str = "Object type mismatch, Alembic object path points to Curves.";
+ return false;
+ }
+
+ return true;
+}
+
+void AbcCurveReader::readObjectData(Main *bmain, const Alembic::Abc::ISampleSelector &sample_sel)
{
Curve *cu = BKE_curve_add(bmain, m_data_name.c_str(), OB_CURVE);
cu->actvert = CU_ACT_NONE;
cu->resolu = 1;
- const ISampleSelector sample_sel(time);
-
ICompoundProperty user_props = m_curves_schema.getUserProperties();
if (user_props) {
const PropertyHeader *header = user_props.getPropertyHeader(ABC_CURVE_RESOLUTION_U_PROPNAME);
* object directly and create a new DerivedMesh from that. Also we might need to
* create new or delete existing NURBS in the curve.
*/
-DerivedMesh *AbcCurveReader::read_derivedmesh(DerivedMesh * /*dm*/, const float time, int /*read_flag*/, const char ** /*err_str*/)
+DerivedMesh *AbcCurveReader::read_derivedmesh(DerivedMesh * /*dm*/,
+ const ISampleSelector &sample_sel,
+ int /*read_flag*/,
+ const char ** /*err_str*/)
{
- ISampleSelector sample_sel(time);
const ICurvesSchema::Sample sample = m_curves_schema.getValue(sample_sel);
const P3fArraySamplePtr &positions = sample.getPositions();
AbcCurveReader(const Alembic::Abc::IObject &object, ImportSettings &settings);
bool valid() const;
+ bool accepts_object_type(const Alembic::AbcCoreAbstract::ObjectHeader &alembic_header,
+ const Object *const ob,
+ const char **err_str) const;
- void readObjectData(Main *bmain, float time);
- DerivedMesh *read_derivedmesh(DerivedMesh *, const float time, int read_flag, const char **err_str);
+ void readObjectData(Main *bmain, const Alembic::Abc::ISampleSelector &sample_sel);
+ DerivedMesh *read_derivedmesh(DerivedMesh *dm,
+ const Alembic::Abc::ISampleSelector &sample_sel,
+ int read_flag,
+ const char **err_str);
};
/* ************************************************************************** */
return m_schema.valid();
}
-void AbcMeshReader::readObjectData(Main *bmain, float time)
+void AbcMeshReader::readObjectData(Main *bmain, const Alembic::Abc::ISampleSelector &sample_sel)
{
Mesh *mesh = BKE_mesh_add(bmain, m_data_name.c_str());
m_object = BKE_object_add_only_object(bmain, OB_MESH, m_object_name.c_str());
m_object->data = mesh;
- const ISampleSelector sample_sel(time);
-
DerivedMesh *dm = CDDM_from_mesh(mesh);
- DerivedMesh *ndm = this->read_derivedmesh(dm, time, MOD_MESHSEQ_READ_ALL, NULL);
+ DerivedMesh *ndm = this->read_derivedmesh(dm, sample_sel, MOD_MESHSEQ_READ_ALL, NULL);
if (ndm != dm) {
dm->release(dm);
}
}
-DerivedMesh *AbcMeshReader::read_derivedmesh(DerivedMesh *dm, const float time, int read_flag, const char **err_str)
+bool AbcMeshReader::accepts_object_type(const Alembic::AbcCoreAbstract::ObjectHeader &alembic_header,
+ const Object *const ob,
+ const char **err_str) const
+{
+ if (!Alembic::AbcGeom::IPolyMesh::matches(alembic_header)) {
+ *err_str = "Object type mismatch, Alembic object path pointed to PolyMesh when importing, but not any more.";
+ return false;
+ }
+
+ if (ob->type != OB_MESH) {
+ *err_str = "Object type mismatch, Alembic object path points to PolyMesh.";
+ return false;
+ }
+
+ return true;
+}
+
+DerivedMesh *AbcMeshReader::read_derivedmesh(DerivedMesh *dm,
+ const ISampleSelector &sample_sel,
+ int read_flag,
+ const char **err_str)
{
- ISampleSelector sample_sel(time);
const IPolyMeshSchema::Sample sample = m_schema.getValue(sample_sel);
const P3fArraySamplePtr &positions = sample.getPositions();
}
CDStreamConfig config = get_config(new_dm ? new_dm : dm);
- config.time = time;
+ config.time = sample_sel.getRequestedTime();
bool do_normals = false;
read_mesh_sample(&settings, m_schema, sample_sel, config, do_normals);
return m_schema.valid();
}
-void AbcSubDReader::readObjectData(Main *bmain, float time)
+bool AbcSubDReader::accepts_object_type(const Alembic::AbcCoreAbstract::ObjectHeader &alembic_header,
+ const Object *const ob,
+ const char **err_str) const
+{
+ if (!Alembic::AbcGeom::ISubD::matches(alembic_header)) {
+ *err_str = "Object type mismatch, Alembic object path pointed to SubD when importing, but not any more.";
+ return false;
+ }
+
+ if (ob->type != OB_MESH) {
+ *err_str = "Object type mismatch, Alembic object path points to SubD.";
+ return false;
+ }
+
+ return true;
+}
+
+void AbcSubDReader::readObjectData(Main *bmain, const Alembic::Abc::ISampleSelector &sample_sel)
{
Mesh *mesh = BKE_mesh_add(bmain, m_data_name.c_str());
m_object->data = mesh;
DerivedMesh *dm = CDDM_from_mesh(mesh);
- DerivedMesh *ndm = this->read_derivedmesh(dm, time, MOD_MESHSEQ_READ_ALL, NULL);
+ DerivedMesh *ndm = this->read_derivedmesh(dm, sample_sel, MOD_MESHSEQ_READ_ALL, NULL);
if (ndm != dm) {
dm->release(dm);
DM_to_mesh(ndm, mesh, m_object, CD_MASK_MESH, true);
- const ISampleSelector sample_sel(time);
const ISubDSchema::Sample sample = m_schema.getValue(sample_sel);
Int32ArraySamplePtr indices = sample.getCreaseIndices();
Alembic::Abc::FloatArraySamplePtr sharpnesses = sample.getCreaseSharpnesses();
}
}
-DerivedMesh *AbcSubDReader::read_derivedmesh(DerivedMesh *dm, const float time, int read_flag, const char **err_str)
+DerivedMesh *AbcSubDReader::read_derivedmesh(DerivedMesh *dm,
+ const ISampleSelector &sample_sel,
+ int read_flag,
+ const char **err_str)
{
- ISampleSelector sample_sel(time);
const ISubDSchema::Sample sample = m_schema.getValue(sample_sel);
const P3fArraySamplePtr &positions = sample.getPositions();
/* Only read point data when streaming meshes, unless we need to create new ones. */
CDStreamConfig config = get_config(new_dm ? new_dm : dm);
- config.time = time;
+ config.time = sample_sel.getRequestedTime();
read_subd_sample(&settings, m_schema, sample_sel, config);
if (new_dm) {
AbcMeshReader(const Alembic::Abc::IObject &object, ImportSettings &settings);
bool valid() const;
+ bool accepts_object_type(const Alembic::AbcCoreAbstract::ObjectHeader &alembic_header,
+ const Object *const ob,
+ const char **err_str) const;
+ void readObjectData(Main *bmain, const Alembic::Abc::ISampleSelector &sample_sel);
- void readObjectData(Main *bmain, float time);
-
- DerivedMesh *read_derivedmesh(DerivedMesh *dm, const float time, int read_flag, const char **err_str);
+ DerivedMesh *read_derivedmesh(DerivedMesh *dm,
+ const Alembic::Abc::ISampleSelector &sample_sel,
+ int read_flag,
+ const char **err_str);
private:
void readFaceSetsSample(Main *bmain, Mesh *mesh, size_t poly_start,
AbcSubDReader(const Alembic::Abc::IObject &object, ImportSettings &settings);
bool valid() const;
-
- void readObjectData(Main *bmain, float time);
- DerivedMesh *read_derivedmesh(DerivedMesh *dm, const float time, int read_flag, const char **err_str);
+ bool accepts_object_type(const Alembic::AbcCoreAbstract::ObjectHeader &alembic_header,
+ const Object *const ob,
+ const char **err_str) const;
+ void readObjectData(Main *bmain, const Alembic::Abc::ISampleSelector &sample_sel);
+ DerivedMesh *read_derivedmesh(DerivedMesh *dm,
+ const Alembic::Abc::ISampleSelector &sample_sel,
+ int read_flag,
+ const char **err_str);
};
/* ************************************************************************** */
return true;
}
-void AbcNurbsReader::readObjectData(Main *bmain, float time)
+void AbcNurbsReader::readObjectData(Main *bmain, const Alembic::Abc::ISampleSelector &sample_sel)
{
Curve *cu = static_cast<Curve *>(BKE_curve_add(bmain, "abc_curve", OB_SURF));
cu->actvert = CU_ACT_NONE;
nu->resolu = cu->resolu;
nu->resolv = cu->resolv;
- const ISampleSelector sample_sel(time);
const INuPatchSchema &schema = it->first;
const INuPatchSchema::Sample smp = schema.getValue(sample_sel);
bool valid() const;
- void readObjectData(Main *bmain, float time);
+ void readObjectData(Main *bmain, const Alembic::Abc::ISampleSelector &sample_sel);
private:
void getNurbsPatches(const Alembic::Abc::IObject &obj);
}
DerivedMesh *AbcObjectReader::read_derivedmesh(DerivedMesh *dm,
- const float UNUSED(time),
- int UNUSED(read_flag),
- const char **UNUSED(err_str))
+ const Alembic::Abc::ISampleSelector &UNUSED(sample_sel),
+ int UNUSED(read_flag),
+ const char **UNUSED(err_str))
{
return dm;
}
const std::string & data_name() const { return m_data_name; }
virtual bool valid() const = 0;
+ virtual bool accepts_object_type(const Alembic::AbcCoreAbstract::ObjectHeader &alembic_header,
+ const Object *const ob,
+ const char **err_str) const = 0;
- virtual void readObjectData(Main *bmain, float time) = 0;
+ virtual void readObjectData(Main *bmain, const Alembic::Abc::ISampleSelector &sample_sel) = 0;
virtual DerivedMesh *read_derivedmesh(DerivedMesh *dm,
- const float time,
+ const Alembic::Abc::ISampleSelector &sample_sel,
int read_flag,
const char **err_str);
return m_schema.valid();
}
-void AbcPointsReader::readObjectData(Main *bmain, float time)
+bool AbcPointsReader::accepts_object_type(const Alembic::AbcCoreAbstract::ObjectHeader &alembic_header,
+ const Object *const ob,
+ const char **err_str) const
+{
+ if (!Alembic::AbcGeom::IPoints::matches(alembic_header)) {
+ *err_str = "Object type mismatch, Alembic object path pointed to Points when importing, but not any more.";
+ return false;
+ }
+
+ if (ob->type != OB_EMPTY) {
+ *err_str = "Object type mismatch, Alembic object path points to Points.";
+ return false;
+ }
+
+ return true;
+}
+
+void AbcPointsReader::readObjectData(Main *bmain, const Alembic::Abc::ISampleSelector &sample_sel)
{
Mesh *mesh = BKE_mesh_add(bmain, m_data_name.c_str());
DerivedMesh *dm = CDDM_from_mesh(mesh);
- DerivedMesh *ndm = this->read_derivedmesh(dm, time, 0, NULL);
+ DerivedMesh *ndm = this->read_derivedmesh(dm, sample_sel, 0, NULL);
if (ndm != dm) {
dm->release(dm);
void read_points_sample(const IPointsSchema &schema,
const ISampleSelector &selector,
- CDStreamConfig &config,
- float time)
+ CDStreamConfig &config)
{
Alembic::AbcGeom::IPointsSchema::Sample sample = schema.getValue(selector);
N3fArraySamplePtr vnormals;
if (has_property(prop, "N")) {
- const Alembic::Util::uint32_t itime = static_cast<Alembic::Util::uint32_t>(time);
+ const Alembic::Util::uint32_t itime = static_cast<Alembic::Util::uint32_t>(selector.getRequestedTime());
const IN3fArrayProperty &normals_prop = IN3fArrayProperty(prop, "N", itime);
if (normals_prop) {
read_mverts(config.mvert, positions, vnormals);
}
-DerivedMesh *AbcPointsReader::read_derivedmesh(DerivedMesh *dm, const float time, int /*read_flag*/, const char ** /*err_str*/)
+DerivedMesh *AbcPointsReader::read_derivedmesh(DerivedMesh *dm,
+ const ISampleSelector &sample_sel,
+ int /*read_flag*/,
+ const char ** /*err_str*/)
{
- ISampleSelector sample_sel(time);
const IPointsSchema::Sample sample = m_schema.getValue(sample_sel);
const P3fArraySamplePtr &positions = sample.getPositions();
}
CDStreamConfig config = get_config(new_dm ? new_dm : dm);
- read_points_sample(m_schema, sample_sel, config, time);
+ read_points_sample(m_schema, sample_sel, config);
return new_dm ? new_dm : dm;
}
AbcPointsReader(const Alembic::Abc::IObject &object, ImportSettings &settings);
bool valid() const;
+ bool accepts_object_type(const Alembic::AbcCoreAbstract::ObjectHeader &alembic_header,
+ const Object *const ob,
+ const char **err_str) const;
- void readObjectData(Main *bmain, float time);
+ void readObjectData(Main *bmain, const Alembic::Abc::ISampleSelector &sample_sel);
- DerivedMesh *read_derivedmesh(DerivedMesh *dm, const float time, int read_flag, const char **err_str);
+ DerivedMesh *read_derivedmesh(DerivedMesh *dm,
+ const Alembic::Abc::ISampleSelector &sample_sel,
+ int read_flag,
+ const char **err_str);
};
void read_points_sample(const Alembic::AbcGeom::IPointsSchema &schema,
const Alembic::AbcGeom::ISampleSelector &selector,
- CDStreamConfig &config,
- float time);
+ CDStreamConfig &config);
#endif /* __ABC_POINTS_H__ */
using Alembic::AbcGeom::OObject;
using Alembic::AbcGeom::OXform;
+using Alembic::Abc::ISampleSelector;
/* ************************************************************************** */
return m_schema.valid();
}
-void AbcEmptyReader::readObjectData(Main *bmain, float /*time*/)
+bool AbcEmptyReader::accepts_object_type(const Alembic::AbcCoreAbstract::ObjectHeader &alembic_header,
+ const Object *const ob,
+ const char **err_str) const
+{
+ if (!Alembic::AbcGeom::IXform::matches(alembic_header)) {
+ *err_str = "Object type mismatch, Alembic object path pointed to XForm when importing, but not any more.";
+ return false;
+ }
+
+ if (ob->type != OB_EMPTY) {
+ *err_str = "Object type mismatch, Alembic object path points to XForm.";
+ return false;
+ }
+
+ return true;
+}
+
+void AbcEmptyReader::readObjectData(Main *bmain, const ISampleSelector &UNUSED(sample_sel))
{
m_object = BKE_object_add_only_object(bmain, OB_EMPTY,
m_object_name.c_str());
AbcEmptyReader(const Alembic::Abc::IObject &object, ImportSettings &settings);
bool valid() const;
+ bool accepts_object_type(const Alembic::AbcCoreAbstract::ObjectHeader &alembic_header,
+ const Object *const ob,
+ const char **err_str) const;
- void readObjectData(Main *bmain, float time);
+ void readObjectData(Main *bmain, const Alembic::Abc::ISampleSelector &sample_sel);
};
#endif /* __ABC_TRANSFORM_H__ */
chrono_t min_time = std::numeric_limits<chrono_t>::max();
chrono_t max_time = std::numeric_limits<chrono_t>::min();
+ ISampleSelector sample_sel(0.0f);
std::vector<AbcObjectReader *>::iterator iter;
for (iter = data->readers.begin(); iter != data->readers.end(); ++iter) {
AbcObjectReader *reader = *iter;
if (reader->valid()) {
- reader->readObjectData(data->bmain, 0.0f);
+ reader->readObjectData(data->bmain, sample_sel);
min_time = std::min(min_time, reader->minTime());
max_time = std::max(max_time, reader->maxTime());
}
const ObjectHeader &header = iobject.getHeader();
-
- if (IPolyMesh::matches(header)) {
- if (ob->type != OB_MESH) {
- *err_str = "Object type mismatch: object path points to a mesh!";
- return NULL;
- }
-
- return abc_reader->read_derivedmesh(dm, time, read_flag, err_str);
- }
- else if (ISubD::matches(header)) {
- if (ob->type != OB_MESH) {
- *err_str = "Object type mismatch: object path points to a subdivision mesh!";
- return NULL;
- }
-
- return abc_reader->read_derivedmesh(dm, time, read_flag, err_str);
- }
- else if (IPoints::matches(header)) {
- if (ob->type != OB_MESH) {
- *err_str = "Object type mismatch: object path points to a point cloud (requires a mesh object)!";
- return NULL;
- }
-
- return abc_reader->read_derivedmesh(dm, time, read_flag, err_str);
- }
- else if (ICurves::matches(header)) {
- if (ob->type != OB_CURVE) {
- *err_str = "Object type mismatch: object path points to a curve!";
- return NULL;
- }
-
- return abc_reader->read_derivedmesh(dm, time, read_flag, err_str);
+ if (!abc_reader->accepts_object_type(header, ob, err_str)) {
+ /* err_str is set by acceptsObjectType() */
+ return NULL;
}
- *err_str = "Unsupported object type: verify object path"; // or poke developer
- return NULL;
+ ISampleSelector sample_sel(time);
+ return abc_reader->read_derivedmesh(dm, sample_sel, read_flag, err_str);
}
/* ************************************************************************** */
{
NodeBilateralBlurData *nbbd = MEM_callocN(sizeof(NodeBilateralBlurData), "node bilateral blur data");
node->storage = nbbd;
+ nbbd->iter = 1;
nbbd->sigma_color = 0.3;
nbbd->sigma_space = 5.0;
}
{
NodeDBlurData *ndbd = MEM_callocN(sizeof(NodeDBlurData), "node dblur data");
node->storage = ndbd;
+ ndbd->iter = 1;
ndbd->center_x = 0.5;
ndbd->center_y = 0.5;
}
"<threads>\n"
"\tUse amount of <threads> for rendering and other operations\n"
"\t[1-" STRINGIFY(BLENDER_MAX_THREADS) "], 0 for systems processor count."
-"(This must be the first argument)"
;
static int arg_handle_threads_set(int argc, const char **argv, void *UNUSED(data))
{
COMMAND
"$<TARGET_FILE_DIR:blender>/${BLENDER_VERSION_MAJOR}.${BLENDER_VERSION_MINOR}/python/bin/python$<$<CONFIG:Debug>:_d>"
${CMAKE_CURRENT_LIST_DIR}/alembic_tests.py
- --blender "${TEST_BLENDER_EXE_BARE}"
+ --blender "$<TARGET_FILE:blender>"
--testdir "${TEST_SRC_DIR}/alembic"
--alembic-root "${ALEMBIC_ROOT_DIR}"
)
coloured_output = proc.stdout
output = self.ansi_remove_re.sub(b'', coloured_output).decode('utf8')
+ # Because of the ANSI colour codes, we need to remove those first before
+ # decoding to text. This means that we cannot use the universal_newlines
+ # parameter to subprocess.run(), and have to do the conversion ourselves
+ output = output.replace('\r\n', '\n').replace('\r', '\n')
+
if proc.returncode:
raise AbcPropError('Error %d running abcls:\n%s' % (proc.returncode, output))
def test_hierarchical_export(self, tempdir: pathlib.Path):
abc = tempdir / 'cubes_hierarchical.abc'
script = "import bpy; bpy.ops.wm.alembic_export(filepath='%s', start=1, end=1, " \
- "renderable_only=True, visible_layers_only=True, flatten=False)" % abc
+ "renderable_only=True, visible_layers_only=True, flatten=False)" % abc.as_posix()
self.run_blender('cubes-hierarchy.blend', script)
# Now check the resulting Alembic file.
def test_flat_export(self, tempdir: pathlib.Path):
abc = tempdir / 'cubes_flat.abc'
script = "import bpy; bpy.ops.wm.alembic_export(filepath='%s', start=1, end=1, " \
- "renderable_only=True, visible_layers_only=True, flatten=True)" % abc
+ "renderable_only=True, visible_layers_only=True, flatten=True)" % abc.as_posix()
self.run_blender('cubes-hierarchy.blend', script)
# Now check the resulting Alembic file.
def test_hierarchical_export(self, tempdir: pathlib.Path):
abc = tempdir / 'dupligroup_hierarchical.abc'
script = "import bpy; bpy.ops.wm.alembic_export(filepath='%s', start=1, end=1, " \
- "renderable_only=True, visible_layers_only=True, flatten=False)" % abc
+ "renderable_only=True, visible_layers_only=True, flatten=False)" % abc.as_posix()
self.run_blender('dupligroup-scene.blend', script)
# Now check the resulting Alembic file.
def test_flat_export(self, tempdir: pathlib.Path):
abc = tempdir / 'dupligroup_hierarchical.abc'
script = "import bpy; bpy.ops.wm.alembic_export(filepath='%s', start=1, end=1, " \
- "renderable_only=True, visible_layers_only=True, flatten=True)" % abc
+ "renderable_only=True, visible_layers_only=True, flatten=True)" % abc.as_posix()
self.run_blender('dupligroup-scene.blend', script)
# Now check the resulting Alembic file.
def test_export_single_curve(self, tempdir: pathlib.Path):
abc = tempdir / 'single-curve.abc'
script = "import bpy; bpy.ops.wm.alembic_export(filepath='%s', start=1, end=1, " \
- "renderable_only=True, visible_layers_only=True, flatten=False)" % abc
+ "renderable_only=True, visible_layers_only=True, flatten=False)" % abc.as_posix()
self.run_blender('single-curve.blend', script)
# Now check the resulting Alembic file.
script = "import bpy; bpy.ops.wm.alembic_export(filepath='%s', start=1, end=1, " \
"renderable_only=True, visible_layers_only=True, flatten=False, " \
"export_hair=%r, export_particles=%r, as_background_job=False)" \
- % (abc, export_hair, export_particles)
+ % (abc.as_posix(), export_hair, export_particles)
self.run_blender('hair-particles.blend', script)
return abc
def test_export_long_names(self, tempdir: pathlib.Path):
abc = tempdir / 'long-names.abc'
script = "import bpy; bpy.ops.wm.alembic_export(filepath='%s', start=1, end=1, " \
- "renderable_only=False, visible_layers_only=False, flatten=False)" % abc
+ "renderable_only=False, visible_layers_only=False, flatten=False)" % abc.as_posix()
self.run_blender('long-names.blend', script)
name_parts = [