7ffdcd3c853b69b2fbca75c4d9787bae6617042e
[blender.git] / release / scripts / op / io_scene_3ds / import_3ds.py
1 # ##### BEGIN GPL LICENSE BLOCK #####
2 #
3 #  This program is free software; you can redistribute it and/or
4 #  modify it under the terms of the GNU General Public License
5 #  as published by the Free Software Foundation; either version 2
6 #  of the License, or (at your option) any later version.
7 #
8 #  This program is distributed in the hope that it will be useful,
9 #  but WITHOUT ANY WARRANTY; without even the implied warranty of
10 #  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11 #  GNU General Public License for more details.
12 #
13 #  You should have received a copy of the GNU General Public License
14 #  along with this program; if not, write to the Free Software Foundation,
15 #  Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
16 #
17 # ##### END GPL LICENSE BLOCK #####
18
19 # <pep8 compliant>
20
21 # Script copyright (C) Bob Holcomb
22 # Contributors: Bob Holcomb, Richard L?rk?ng, Damien McGinnes, Campbell Barton, Mario Lapin, Dominique Lorre
23
24 import os
25 import time
26 import struct
27
28 from io_utils import load_image
29
30 import bpy
31 import mathutils
32
33 BOUNDS_3DS = []
34
35
36 ######################################################
37 # Data Structures
38 ######################################################
39
40 #Some of the chunks that we will see
41 #----- Primary Chunk, at the beginning of each file
42 PRIMARY = int('0x4D4D',16)
43
44 #------ Main Chunks
45 OBJECTINFO   =     0x3D3D      #This gives the version of the mesh and is found right before the material and object information
46 VERSION      =     0x0002      #This gives the version of the .3ds file
47 EDITKEYFRAME=      0xB000      #This is the header for all of the key frame info
48
49 #------ sub defines of OBJECTINFO
50 MATERIAL = 45055                #0xAFFF                         // This stored the texture info
51 OBJECT = 16384          #0x4000                         // This stores the faces, vertices, etc...
52
53 #>------ sub defines of MATERIAL
54 #------ sub defines of MATERIAL_BLOCK
55 MAT_NAME                =       0xA000  # This holds the material name
56 MAT_AMBIENT             =       0xA010  # Ambient color of the object/material
57 MAT_DIFFUSE             =       0xA020  # This holds the color of the object/material
58 MAT_SPECULAR    =       0xA030  # SPecular color of the object/material
59 MAT_SHINESS             =       0xA040  # ??
60 MAT_TRANSPARENCY=       0xA050  # Transparency value of material
61 MAT_SELF_ILLUM  =       0xA080  # Self Illumination value of material
62 MAT_WIRE                =       0xA085  # Only render's wireframe
63
64 MAT_TEXTURE_MAP =       0xA200  # This is a header for a new texture map
65 MAT_SPECULAR_MAP=       0xA204  # This is a header for a new specular map
66 MAT_OPACITY_MAP =       0xA210  # This is a header for a new opacity map
67 MAT_REFLECTION_MAP=     0xA220  # This is a header for a new reflection map
68 MAT_BUMP_MAP    =       0xA230  # This is a header for a new bump map
69 MAT_MAP_FILEPATH =  0xA300  # This holds the file name of the texture
70
71 MAT_FLOAT_COLOR = 0x0010  #color defined as 3 floats
72 MAT_24BIT_COLOR = 0x0011  #color defined as 3 bytes
73
74 #>------ sub defines of OBJECT
75 OBJECT_MESH  =      0x4100      # This lets us know that we are reading a new object
76 OBJECT_LAMP =      0x4600      # This lets un know we are reading a light object
77 OBJECT_LAMP_SPOT = 0x4610               # The light is a spotloght.
78 OBJECT_LAMP_OFF = 0x4620                # The light off.
79 OBJECT_LAMP_ATTENUATE = 0x4625
80 OBJECT_LAMP_RAYSHADE = 0x4627
81 OBJECT_LAMP_SHADOWED = 0x4630
82 OBJECT_LAMP_LOCAL_SHADOW = 0x4640
83 OBJECT_LAMP_LOCAL_SHADOW2 = 0x4641
84 OBJECT_LAMP_SEE_CONE = 0x4650
85 OBJECT_LAMP_SPOT_RECTANGULAR = 0x4651
86 OBJECT_LAMP_SPOT_OVERSHOOT = 0x4652
87 OBJECT_LAMP_SPOT_PROJECTOR = 0x4653
88 OBJECT_LAMP_EXCLUDE = 0x4654
89 OBJECT_LAMP_RANGE = 0x4655
90 OBJECT_LAMP_ROLL = 0x4656
91 OBJECT_LAMP_SPOT_ASPECT = 0x4657
92 OBJECT_LAMP_RAY_BIAS = 0x4658
93 OBJECT_LAMP_INNER_RANGE = 0x4659
94 OBJECT_LAMP_OUTER_RANGE = 0x465A
95 OBJECT_LAMP_MULTIPLIER = 0x465B
96 OBJECT_LAMP_AMBIENT_LIGHT = 0x4680
97
98
99
100 OBJECT_CAMERA=      0x4700      # This lets un know we are reading a camera object
101
102 #>------ sub defines of CAMERA
103 OBJECT_CAM_RANGES=   0x4720      # The camera range values
104
105 #>------ sub defines of OBJECT_MESH
106 OBJECT_VERTICES =   0x4110      # The objects vertices
107 OBJECT_FACES    =   0x4120      # The objects faces
108 OBJECT_MATERIAL =   0x4130      # This is found if the object has a material, either texture map or color
109 OBJECT_UV       =   0x4140      # The UV texture coordinates
110 OBJECT_TRANS_MATRIX  =   0x4160 # The Object Matrix
111
112 #>------ sub defines of EDITKEYFRAME
113 # ED_KEY_AMBIENT_NODE        =   0xB001
114 ED_KEY_OBJECT_NODE         =   0xB002
115 # ED_KEY_CAMERA_NODE         =   0xB003
116 # ED_KEY_TARGET_NODE         =   0xB004
117 # ED_KEY_LIGHT_NODE          =   0xB005
118 # ED_KEY_L_TARGET_NODE       =   0xB006  
119 # ED_KEY_SPOTLIGHT_NODE      =   0xB007
120 #>------ sub defines of ED_KEY_OBJECT_NODE
121 # EK_OB_KEYFRAME_SEG        =   0xB008
122 # EK_OB_KEYFRAME_CURTIME    =   0xB009
123 # EK_OB_KEYFRAME_HEADER     =   0xB00A
124 EK_OB_NODE_HEADER         =   0xB010
125 EK_OB_INSTANCE_NAME       =   0xB011
126 # EK_OB_PRESCALE            =   0xB012
127 # EK_OB_PIVOT               =   0xB013
128 # EK_OB_BOUNDBOX            =   0xB014
129 # EK_OB_MORPH_SMOOTH        =   0xB015
130 EK_OB_POSITION_TRACK      =   0xB020
131 EK_OB_ROTATION_TRACK      =   0xB021
132 EK_OB_SCALE_TRACK         =   0xB022
133 # EK_OB_CAMERA_FOV_TRACK =       0xB023
134 # EK_OB_CAMERA_ROLL_TRACK   =   0xB024
135 # EK_OB_COLOR_TRACK         =   0xB025
136 # EK_OB_MORPH_TRACK         =   0xB026
137 # EK_OB_HOTSPOT_TRACK       =   0xB027
138 # EK_OB_FALLOF_TRACK        =   0xB028
139 # EK_OB_HIDE_TRACK          =   0xB029
140 # EK_OB_NODE_ID             =   0xB030
141
142 ROOT_OBJECT         =   0xFFFF
143
144 global scn
145 scn = None
146 global object_dictionary # dictionary for object hierarchy
147 object_dictionary = {} 
148
149
150 #the chunk class
151 class chunk:
152     ID = 0
153     length = 0
154     bytes_read = 0
155
156     #we don't read in the bytes_read, we compute that
157     binary_format='<HI'
158
159     def __init__(self):
160         self.ID = 0
161         self.length = 0
162         self.bytes_read = 0
163
164     def dump(self):
165         print('ID: ', self.ID)
166         print('ID in hex: ', hex(self.ID))
167         print('length: ', self.length)
168         print('bytes_read: ', self.bytes_read)
169
170 def read_chunk(file, chunk):
171     temp_data = file.read(struct.calcsize(chunk.binary_format))
172     data = struct.unpack(chunk.binary_format, temp_data)
173     chunk.ID = data[0]
174     chunk.length = data[1]
175     #update the bytes read function
176     chunk.bytes_read = 6
177
178     #if debugging
179     #chunk.dump()
180
181 def read_string(file):
182     #read in the characters till we get a null character
183     s = b''
184     while True:
185         c = struct.unpack('<c', file.read(1))[0]
186         if c == b'\x00':
187             break
188         s += c
189         #print 'string: ',s
190
191     #remove the null character from the string
192 #       print("read string", s)
193     return str(s, "utf-8", "replace"), len(s) + 1
194
195 ######################################################
196 # IMPORT
197 ######################################################
198 def process_next_object_chunk(file, previous_chunk):
199     new_chunk = chunk()
200     temp_chunk = chunk()
201
202     while (previous_chunk.bytes_read < previous_chunk.length):
203         #read the next chunk
204         read_chunk(file, new_chunk)
205
206 def skip_to_end(file, skip_chunk):
207     buffer_size = skip_chunk.length - skip_chunk.bytes_read
208     binary_format='%ic' % buffer_size
209     temp_data = file.read(struct.calcsize(binary_format))
210     skip_chunk.bytes_read += buffer_size
211
212
213 def add_texture_to_material(image, texture, material, mapto):
214     #print('assigning %s to %s' % (texture, material))
215
216     if mapto not in ("COLOR", "SPECULARITY", "ALPHA", "NORMAL"):
217         print('/tError:  Cannot map to "%s"\n\tassuming diffuse color. modify material "%s" later.' % (mapto, material.name))
218         mapto = "COLOR"
219
220     if image:
221         texture.image = image
222
223     mtex = material.texture_slots.add()
224     mtex.texture = texture
225     mtex.texture_coords = 'UV'
226     mtex.use_map_color_diffuse = False
227
228     if mapto == 'COLOR':
229         mtex.use_map_color_diffuse = True
230     elif mapto == 'SPECULARITY':
231         mtex.use_map_specular = True
232     elif mapto == 'ALPHA':
233         mtex.use_map_alpha = True
234     elif mapto == 'NORMAL':
235         mtex.use_map_normal = True
236
237
238 def process_next_chunk(file, previous_chunk, importedObjects, IMAGE_SEARCH):
239     #print previous_chunk.bytes_read, 'BYTES READ'
240     contextObName = None
241     contextLamp = [None, None] # object, Data
242     contextMaterial = None
243     contextMatrix_rot = None # Blender.mathutils.Matrix(); contextMatrix.identity()
244     #contextMatrix_tx = None # Blender.mathutils.Matrix(); contextMatrix.identity()
245     contextMesh_vertls = None # flat array: (verts * 3)
246     contextMesh_facels = None
247     contextMeshMaterials = {} # matname:[face_idxs]
248     contextMeshUV = None # flat array (verts * 2)
249
250     TEXTURE_DICT = {}
251     MATDICT = {}
252 #       TEXMODE = Mesh.FaceModes['TEX']
253
254     # Localspace variable names, faster.
255     STRUCT_SIZE_1CHAR = struct.calcsize('c')
256     STRUCT_SIZE_2FLOAT = struct.calcsize('2f')
257     STRUCT_SIZE_3FLOAT = struct.calcsize('3f')
258     STRUCT_SIZE_4FLOAT = struct.calcsize('4f')
259     STRUCT_SIZE_UNSIGNED_SHORT = struct.calcsize('H')
260     STRUCT_SIZE_4UNSIGNED_SHORT = struct.calcsize('4H')
261     STRUCT_SIZE_4x3MAT = struct.calcsize('ffffffffffff')
262     _STRUCT_SIZE_4x3MAT = struct.calcsize('fffffffffffff')
263     # STRUCT_SIZE_4x3MAT = calcsize('ffffffffffff')
264     # print STRUCT_SIZE_4x3MAT, ' STRUCT_SIZE_4x3MAT'
265     # only init once
266     object_list = [] # for hierarchy
267     object_parent = [] # index of parent in hierarchy, 0xFFFF = no parent
268     
269     def putContextMesh(myContextMesh_vertls, myContextMesh_facels, myContextMeshMaterials):
270         bmesh = bpy.data.meshes.new(contextObName)
271
272         if myContextMesh_facels is None:
273             myContextMesh_facels = []
274
275         if myContextMesh_vertls:
276
277             bmesh.vertices.add(len(myContextMesh_vertls)//3)
278             bmesh.faces.add(len(myContextMesh_facels))
279             bmesh.vertices.foreach_set("co", myContextMesh_vertls)
280             
281             eekadoodle_faces = []
282             for v1, v2, v3 in myContextMesh_facels:
283                 eekadoodle_faces.extend([v3, v1, v2, 0] if v3 == 0 else [v1, v2, v3, 0])
284             bmesh.faces.foreach_set("vertices_raw", eekadoodle_faces)
285             
286             if bmesh.faces and contextMeshUV:
287                 bmesh.uv_textures.new()
288                 uv_faces = bmesh.uv_textures.active.data[:]
289             else:
290                 uv_faces = None
291
292             for mat_idx, (matName, faces) in enumerate(myContextMeshMaterials.items()):
293                 if matName is None:
294                     bmat = None
295                 else:
296                     bmat = MATDICT[matName][1]
297                     img = TEXTURE_DICT.get(bmat.name)
298
299                 bmesh.materials.append(bmat) # can be None
300
301                 if uv_faces  and img:
302                     for fidx in faces:
303                         bmesh.faces[fidx].material_index = mat_idx
304                         uf = uv_faces[fidx]
305                         uf.image = img
306                         uf.use_image = True
307                 else:
308                     for fidx in faces:
309                         bmesh.faces[fidx].material_index = mat_idx
310                 
311             if uv_faces:
312                 for fidx, uf in enumerate(uv_faces):
313                     face = myContextMesh_facels[fidx]
314                     v1, v2, v3 = face
315                     
316                     # eekadoodle
317                     if v3 == 0:
318                         v1, v2, v3 = v3, v1, v2
319                     
320                     uf.uv1 = contextMeshUV[v1 * 2:(v1 * 2) + 2]
321                     uf.uv2 = contextMeshUV[v2 * 2:(v2 * 2) + 2]
322                     uf.uv3 = contextMeshUV[v3 * 2:(v3 * 2) + 2]
323                     # always a tri
324
325         ob = bpy.data.objects.new(contextObName, bmesh)
326         object_dictionary[contextObName] = ob
327         SCN.objects.link(ob)
328         
329         '''
330         if contextMatrix_tx:
331             ob.setMatrix(contextMatrix_tx)
332         '''
333         
334         if contextMatrix_rot:
335             ob.matrix_local = contextMatrix_rot
336
337         importedObjects.append(ob)
338         bmesh.update()
339
340     #a spare chunk
341     new_chunk = chunk()
342     temp_chunk = chunk()
343
344     CreateBlenderObject = False
345
346     def read_float_color(temp_chunk):
347         temp_data = file.read(struct.calcsize('3f'))
348         temp_chunk.bytes_read += 12
349         return [float(col) for col in struct.unpack('<3f', temp_data)]
350
351     def read_byte_color(temp_chunk):
352         temp_data = file.read(struct.calcsize('3B'))
353         temp_chunk.bytes_read += 3
354         return [float(col)/255 for col in struct.unpack('<3B', temp_data)] # data [0,1,2] == rgb
355
356     def read_texture(new_chunk, temp_chunk, name, mapto):
357         new_texture = bpy.data.textures.new(name, type='IMAGE')
358
359         img = None
360         while (new_chunk.bytes_read < new_chunk.length):
361             #print 'MAT_TEXTURE_MAP..while', new_chunk.bytes_read, new_chunk.length
362             read_chunk(file, temp_chunk)
363
364             if (temp_chunk.ID == MAT_MAP_FILEPATH):
365                 texture_name, read_str_len = read_string(file)
366                 img = TEXTURE_DICT[contextMaterial.name] = load_image(texture_name, dirname)
367                 new_chunk.bytes_read += read_str_len #plus one for the null character that gets removed
368
369             else:
370                 skip_to_end(file, temp_chunk)
371
372             new_chunk.bytes_read += temp_chunk.bytes_read
373
374         # add the map to the material in the right channel
375         if img:
376             add_texture_to_material(img, new_texture, contextMaterial, mapto)
377
378     dirname = os.path.dirname(file.name)
379
380     #loop through all the data for this chunk (previous chunk) and see what it is
381     while (previous_chunk.bytes_read < previous_chunk.length):
382         #print '\t', previous_chunk.bytes_read, 'keep going'
383         #read the next chunk
384         #print 'reading a chunk'
385         read_chunk(file, new_chunk)
386
387         #is it a Version chunk?
388         if (new_chunk.ID == VERSION):
389             #print 'if (new_chunk.ID == VERSION):'
390             #print 'found a VERSION chunk'
391             #read in the version of the file
392             #it's an unsigned short (H)
393             temp_data = file.read(struct.calcsize('I'))
394             version = struct.unpack('<I', temp_data)[0]
395             new_chunk.bytes_read += 4 #read the 4 bytes for the version number
396             #this loader works with version 3 and below, but may not with 4 and above
397             if (version > 3):
398                 print('\tNon-Fatal Error:  Version greater than 3, may not load correctly: ', version)
399
400         #is it an object info chunk?
401         elif (new_chunk.ID == OBJECTINFO):
402             #print 'elif (new_chunk.ID == OBJECTINFO):'
403             # print 'found an OBJECTINFO chunk'
404             process_next_chunk(file, new_chunk, importedObjects, IMAGE_SEARCH)
405
406             #keep track of how much we read in the main chunk
407             new_chunk.bytes_read += temp_chunk.bytes_read
408
409         #is it an object chunk?
410         elif (new_chunk.ID == OBJECT):
411
412             if CreateBlenderObject:
413                 putContextMesh(contextMesh_vertls, contextMesh_facels, contextMeshMaterials)
414                 contextMesh_vertls = []; contextMesh_facels = []
415
416                 ## preparando para receber o proximo objeto
417                 contextMeshMaterials = {} # matname:[face_idxs]
418                 contextMeshUV = None
419                 #contextMesh.vertexUV = 1 # Make sticky coords.
420                 # Reset matrix
421                 contextMatrix_rot = None
422                 #contextMatrix_tx = None
423
424             CreateBlenderObject = True
425             contextObName, read_str_len = read_string(file)
426             new_chunk.bytes_read += read_str_len
427
428         #is it a material chunk?
429         elif (new_chunk.ID == MATERIAL):
430
431 #                       print("read material")
432
433             #print 'elif (new_chunk.ID == MATERIAL):'
434             contextMaterial = bpy.data.materials.new('Material')
435
436         elif (new_chunk.ID == MAT_NAME):
437             #print 'elif (new_chunk.ID == MAT_NAME):'
438             material_name, read_str_len = read_string(file)
439
440 #                       print("material name", material_name)
441
442             #plus one for the null character that ended the string
443             new_chunk.bytes_read += read_str_len
444
445             contextMaterial.name = material_name.rstrip() # remove trailing  whitespace
446             MATDICT[material_name]= (contextMaterial.name, contextMaterial)
447
448         elif (new_chunk.ID == MAT_AMBIENT):
449             #print 'elif (new_chunk.ID == MAT_AMBIENT):'
450             read_chunk(file, temp_chunk)
451             if (temp_chunk.ID == MAT_FLOAT_COLOR):
452                 contextMaterial.mirror_color = read_float_color(temp_chunk)
453 #                               temp_data = file.read(struct.calcsize('3f'))
454 #                               temp_chunk.bytes_read += 12
455 #                               contextMaterial.mirCol = [float(col) for col in struct.unpack('<3f', temp_data)]
456             elif (temp_chunk.ID == MAT_24BIT_COLOR):
457                 contextMaterial.mirror_color = read_byte_color(temp_chunk)
458 #                               temp_data = file.read(struct.calcsize('3B'))
459 #                               temp_chunk.bytes_read += 3
460 #                               contextMaterial.mirCol = [float(col)/255 for col in struct.unpack('<3B', temp_data)] # data [0,1,2] == rgb
461             else:
462                 skip_to_end(file, temp_chunk)
463             new_chunk.bytes_read += temp_chunk.bytes_read
464
465         elif (new_chunk.ID == MAT_DIFFUSE):
466             #print 'elif (new_chunk.ID == MAT_DIFFUSE):'
467             read_chunk(file, temp_chunk)
468             if (temp_chunk.ID == MAT_FLOAT_COLOR):
469                 contextMaterial.diffuse_color = read_float_color(temp_chunk)
470 #                               temp_data = file.read(struct.calcsize('3f'))
471 #                               temp_chunk.bytes_read += 12
472 #                               contextMaterial.rgbCol = [float(col) for col in struct.unpack('<3f', temp_data)]
473             elif (temp_chunk.ID == MAT_24BIT_COLOR):
474                 contextMaterial.diffuse_color = read_byte_color(temp_chunk)
475 #                               temp_data = file.read(struct.calcsize('3B'))
476 #                               temp_chunk.bytes_read += 3
477 #                               contextMaterial.rgbCol = [float(col)/255 for col in struct.unpack('<3B', temp_data)] # data [0,1,2] == rgb
478             else:
479                 skip_to_end(file, temp_chunk)
480
481 #                       print("read material diffuse color", contextMaterial.diffuse_color)
482
483             new_chunk.bytes_read += temp_chunk.bytes_read
484
485         elif (new_chunk.ID == MAT_SPECULAR):
486             #print 'elif (new_chunk.ID == MAT_SPECULAR):'
487             read_chunk(file, temp_chunk)
488             if (temp_chunk.ID == MAT_FLOAT_COLOR):
489                 contextMaterial.specular_color = read_float_color(temp_chunk)
490 #                               temp_data = file.read(struct.calcsize('3f'))
491 #                               temp_chunk.bytes_read += 12
492 #                               contextMaterial.mirCol = [float(col) for col in struct.unpack('<3f', temp_data)]
493             elif (temp_chunk.ID == MAT_24BIT_COLOR):
494                 contextMaterial.specular_color = read_byte_color(temp_chunk)
495 #                               temp_data = file.read(struct.calcsize('3B'))
496 #                               temp_chunk.bytes_read += 3
497 #                               contextMaterial.mirCol = [float(col)/255 for col in struct.unpack('<3B', temp_data)] # data [0,1,2] == rgb
498             else:
499                 skip_to_end(file, temp_chunk)
500             new_chunk.bytes_read += temp_chunk.bytes_read
501
502         elif (new_chunk.ID == MAT_TEXTURE_MAP):
503             read_texture(new_chunk, temp_chunk, "Diffuse", "COLOR")
504
505         elif (new_chunk.ID == MAT_SPECULAR_MAP):
506             read_texture(new_chunk, temp_chunk, "Specular", "SPECULARITY")
507
508         elif (new_chunk.ID == MAT_OPACITY_MAP):
509             read_texture(new_chunk, temp_chunk, "Opacity", "ALPHA")
510
511         elif (new_chunk.ID == MAT_BUMP_MAP):
512             read_texture(new_chunk, temp_chunk, "Bump", "NORMAL")
513
514         elif (new_chunk.ID == MAT_TRANSPARENCY):
515             #print 'elif (new_chunk.ID == MAT_TRANSPARENCY):'
516             read_chunk(file, temp_chunk)
517             temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
518
519             temp_chunk.bytes_read += 2
520             contextMaterial.alpha = 1-(float(struct.unpack('<H', temp_data)[0])/100)
521             new_chunk.bytes_read += temp_chunk.bytes_read
522
523
524         elif (new_chunk.ID == OBJECT_LAMP): # Basic lamp support.
525
526             temp_data = file.read(STRUCT_SIZE_3FLOAT)
527
528             x,y,z = struct.unpack('<3f', temp_data)
529             new_chunk.bytes_read += STRUCT_SIZE_3FLOAT
530
531             # no lamp in dict that would be confusing
532             ob = bpy.data.objects.new("Lamp", bpy.data.lamps.new("Lamp"))
533             SCN.objects.link(ob)
534
535             contextLamp[1]= ob.data
536 #                       contextLamp[1]= bpy.data.lamps.new()
537             contextLamp[0]= ob
538 #                       contextLamp[0]= SCN_OBJECTS.new(contextLamp[1])
539             importedObjects.append(contextLamp[0])
540
541             #print 'number of faces: ', num_faces
542             #print x,y,z
543             contextLamp[0].location = (x, y, z)
544 #                       contextLamp[0].setLocation(x,y,z)
545
546             # Reset matrix
547             contextMatrix_rot = None
548             #contextMatrix_tx = None
549             #print contextLamp.name,
550
551         elif (new_chunk.ID == OBJECT_MESH):
552             # print 'Found an OBJECT_MESH chunk'
553             pass
554         elif (new_chunk.ID == OBJECT_VERTICES):
555             '''
556             Worldspace vertex locations
557             '''
558             # print 'elif (new_chunk.ID == OBJECT_VERTICES):'
559             temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
560             num_verts = struct.unpack('<H', temp_data)[0]
561             new_chunk.bytes_read += 2
562
563             # print 'number of verts: ', num_verts
564             contextMesh_vertls = struct.unpack('<%df' % (num_verts * 3), file.read(STRUCT_SIZE_3FLOAT * num_verts))
565             new_chunk.bytes_read += STRUCT_SIZE_3FLOAT * num_verts
566             # dummyvert is not used atm!
567             
568             #print 'object verts: bytes read: ', new_chunk.bytes_read
569
570         elif (new_chunk.ID == OBJECT_FACES):
571             # print 'elif (new_chunk.ID == OBJECT_FACES):'
572             temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
573             num_faces = struct.unpack('<H', temp_data)[0]
574             new_chunk.bytes_read += 2
575             #print 'number of faces: ', num_faces
576
577             # print '\ngetting a face'
578             temp_data = file.read(STRUCT_SIZE_4UNSIGNED_SHORT * num_faces)
579             new_chunk.bytes_read += STRUCT_SIZE_4UNSIGNED_SHORT * num_faces #4 short ints x 2 bytes each
580             contextMesh_facels = struct.unpack('<%dH' % (num_faces * 4), temp_data)
581             contextMesh_facels = [contextMesh_facels[i - 3:i] for i in range(3, (num_faces * 4) + 3, 4)]
582
583         elif (new_chunk.ID == OBJECT_MATERIAL):
584             # print 'elif (new_chunk.ID == OBJECT_MATERIAL):'
585             material_name, read_str_len = read_string(file)
586             new_chunk.bytes_read += read_str_len # remove 1 null character.
587
588             temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
589             num_faces_using_mat = struct.unpack('<H', temp_data)[0]
590             new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT
591
592             
593             temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * num_faces_using_mat)
594             new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * num_faces_using_mat
595
596             contextMeshMaterials[material_name]= struct.unpack("<%dH" % (num_faces_using_mat), temp_data)
597
598             #look up the material in all the materials
599
600         elif (new_chunk.ID == OBJECT_UV):
601             temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
602             num_uv = struct.unpack('<H', temp_data)[0]
603             new_chunk.bytes_read += 2
604
605             temp_data = file.read(STRUCT_SIZE_2FLOAT * num_uv)
606             new_chunk.bytes_read += STRUCT_SIZE_2FLOAT * num_uv
607             contextMeshUV = struct.unpack('<%df' % (num_uv * 2), temp_data)
608
609         elif (new_chunk.ID == OBJECT_TRANS_MATRIX):
610             # How do we know the matrix size? 54 == 4x4 48 == 4x3
611             temp_data = file.read(STRUCT_SIZE_4x3MAT)
612             data = list( struct.unpack('<ffffffffffff', temp_data)  )
613             new_chunk.bytes_read += STRUCT_SIZE_4x3MAT
614
615             contextMatrix_rot = mathutils.Matrix((data[:3] + [0], \
616                                                   data[3:6] + [0], \
617                                                   data[6:9] + [0], \
618                                                   data[9:] + [1], \
619                                                   ))
620
621         elif  (new_chunk.ID == MAT_MAP_FILEPATH):
622             texture_name, read_str_len = read_string(file)
623             try:
624                 TEXTURE_DICT[contextMaterial.name]
625             except:
626                 #img = TEXTURE_DICT[contextMaterial.name]= BPyImage.comprehensiveImageLoad(texture_name, FILEPATH)
627                 img = TEXTURE_DICT[contextMaterial.name] = load_image(texture_name, dirname)
628 #                               img = TEXTURE_DICT[contextMaterial.name]= BPyImage.comprehensiveImageLoad(texture_name, FILEPATH, PLACE_HOLDER=False, RECURSIVE=IMAGE_SEARCH)
629
630             new_chunk.bytes_read += read_str_len #plus one for the null character that gets removed
631         elif new_chunk.ID == EDITKEYFRAME:
632             pass
633
634         elif new_chunk.ID == ED_KEY_OBJECT_NODE: #another object is being processed
635             child = None
636
637         elif new_chunk.ID == EK_OB_NODE_HEADER:
638             object_name, read_str_len = read_string(file)
639             new_chunk.bytes_read += read_str_len
640             temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * 2)
641             new_chunk.bytes_read += 4                   
642             temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
643             hierarchy = struct.unpack('<H', temp_data)[0]
644             new_chunk.bytes_read += 2
645
646             child = object_dictionary.get(object_name)
647
648             if child is None:
649                 child = bpy.data.objects.new(object_name, None) # create an empty object
650                 SCN.objects.link(child)                 
651
652             object_list.append(child)
653             object_parent.append(hierarchy)
654
655         elif new_chunk.ID == EK_OB_INSTANCE_NAME:
656             object_name, read_str_len = read_string(file)
657             child.name = object_name
658             object_dictionary[object_name] = child
659             new_chunk.bytes_read += read_str_len
660
661         elif new_chunk.ID == EK_OB_POSITION_TRACK: # translation
662             new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 5
663             temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * 5)
664             temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
665             nkeys = struct.unpack('<H', temp_data)[0]
666             temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
667             new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 2
668             for i in range(nkeys):
669                 temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
670                 nframe = struct.unpack('<H', temp_data)[0]
671                 new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT
672                 temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * 2)
673                 new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 2
674                 temp_data = file.read(STRUCT_SIZE_3FLOAT)
675                 loc = struct.unpack('<3f', temp_data)
676                 new_chunk.bytes_read += STRUCT_SIZE_3FLOAT
677                 if nframe == 0:
678                     child.location = loc
679
680         elif new_chunk.ID == EK_OB_ROTATION_TRACK: # rotation
681             new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 5
682             temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * 5)
683             temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
684             nkeys = struct.unpack('<H', temp_data)[0]
685             temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
686             new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 2
687             for i in range(nkeys):
688                 temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
689                 nframe = struct.unpack('<H', temp_data)[0]
690                 new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT
691                 temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * 2)
692                 new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 2
693                 temp_data = file.read(STRUCT_SIZE_4FLOAT)
694                 rad,axis_x,axis_y,axis_z = struct.unpack('<4f', temp_data)
695                 new_chunk.bytes_read += STRUCT_SIZE_4FLOAT
696                 if nframe == 0:
697                     child.rotation_euler = mathutils.Quaternion((axis_x, axis_y, axis_z), -rad).to_euler()   # why negative?
698
699         elif new_chunk.ID == EK_OB_SCALE_TRACK: # translation
700             new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 5
701             temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * 5)
702             temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
703             nkeys = struct.unpack('<H', temp_data)[0]
704             temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
705             new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 2
706             for i in range(nkeys):
707                 temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
708                 nframe = struct.unpack('<H', temp_data)[0]
709                 new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT
710                 temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * 2)
711                 new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 2
712                 temp_data = file.read(STRUCT_SIZE_3FLOAT)
713                 sca = struct.unpack('<3f', temp_data)
714                 new_chunk.bytes_read += STRUCT_SIZE_3FLOAT
715                 if nframe == 0:
716                     child.scale = sca
717
718         else: #(new_chunk.ID!=VERSION or new_chunk.ID!=OBJECTINFO or new_chunk.ID!=OBJECT or new_chunk.ID!=MATERIAL):
719             # print 'skipping to end of this chunk'
720             #print("unknown chunk: "+hex(new_chunk.ID))
721             buffer_size = new_chunk.length - new_chunk.bytes_read
722             binary_format='%ic' % buffer_size
723             temp_data = file.read(struct.calcsize(binary_format))
724             new_chunk.bytes_read += buffer_size
725
726
727         #update the previous chunk bytes read
728         # print 'previous_chunk.bytes_read += new_chunk.bytes_read'
729         # print previous_chunk.bytes_read, new_chunk.bytes_read
730         previous_chunk.bytes_read += new_chunk.bytes_read
731         ## print 'Bytes left in this chunk: ', previous_chunk.length - previous_chunk.bytes_read
732
733     # FINISHED LOOP
734     # There will be a number of objects still not added
735     if CreateBlenderObject:
736         putContextMesh(contextMesh_vertls, contextMesh_facels, contextMeshMaterials)
737
738
739     # Assign parents to objects    
740     for ind, ob in enumerate(object_list):
741         parent = object_parent[ind]
742         if parent == ROOT_OBJECT:
743             ob.parent = None
744         else:
745             ob.parent = object_list[parent]
746
747
748 def load_3ds(filepath, context, IMPORT_CONSTRAIN_BOUNDS=10.0, IMAGE_SEARCH=True, APPLY_MATRIX=True):
749     global SCN
750
751     # XXX
752 #       if BPyMessages.Error_NoFile(filepath):
753 #               return
754
755     print("importing 3DS: %r..." % (filepath), end="")
756
757     time1 = time.clock()
758 #       time1 = Blender.sys.time()
759
760     current_chunk = chunk()
761
762     file = open(filepath, 'rb')
763
764     #here we go!
765     # print 'reading the first chunk'
766     read_chunk(file, current_chunk)
767     if (current_chunk.ID!=PRIMARY):
768         print('\tFatal Error:  Not a valid 3ds file: %r' % filepath)
769         file.close()
770         return
771
772
773     # IMPORT_AS_INSTANCE = Blender.Draw.Create(0)
774 #       IMPORT_CONSTRAIN_BOUNDS = Blender.Draw.Create(10.0)
775 #       IMAGE_SEARCH = Blender.Draw.Create(1)
776 #       APPLY_MATRIX = Blender.Draw.Create(0)
777
778     # Get USER Options
779 #       pup_block = [\
780 #       ('Size Constraint:', IMPORT_CONSTRAIN_BOUNDS, 0.0, 1000.0, 'Scale the model by 10 until it reacehs the size constraint. Zero Disables.'),\
781 #       ('Image Search', IMAGE_SEARCH, 'Search subdirs for any assosiated images (Warning, may be slow)'),\
782 #       ('Transform Fix', APPLY_MATRIX, 'Workaround for object transformations importing incorrectly'),\
783 #       #('Group Instance', IMPORT_AS_INSTANCE, 'Import objects into a new scene and group, creating an instance in the current scene.'),\
784 #       ]
785
786 #       if PREF_UI:
787 #               if not Blender.Draw.PupBlock('Import 3DS...', pup_block):
788 #                       return
789
790 #       Blender.Window.WaitCursor(1)
791
792 #       IMPORT_CONSTRAIN_BOUNDS = IMPORT_CONSTRAIN_BOUNDS.val
793 #       # IMPORT_AS_INSTANCE = IMPORT_AS_INSTANCE.val
794 #       IMAGE_SEARCH = IMAGE_SEARCH.val
795 #       APPLY_MATRIX = APPLY_MATRIX.val
796
797     if IMPORT_CONSTRAIN_BOUNDS:
798         BOUNDS_3DS[:]= [1<<30, 1<<30, 1<<30, -1<<30, -1<<30, -1<<30]
799     else:
800         BOUNDS_3DS[:]= []
801
802     ##IMAGE_SEARCH
803
804     # fixme, make unglobal, clear incase
805     object_dictionary.clear()
806
807     scn = context.scene
808 #       scn = bpy.data.scenes.active
809     SCN = scn
810 #       SCN_OBJECTS = scn.objects
811 #       SCN_OBJECTS.selected = [] # de select all
812
813     importedObjects = [] # Fill this list with objects
814     process_next_chunk(file, current_chunk, importedObjects, IMAGE_SEARCH)
815
816     # fixme, make unglobal
817     object_dictionary.clear()
818
819     # Link the objects into this scene.
820     # Layers = scn.Layers
821
822     # REMOVE DUMMYVERT, - remove this in the next release when blenders internal are fixed.
823
824     if APPLY_MATRIX:
825         for ob in importedObjects:
826             if ob.type == 'MESH':
827                 me = ob.data
828                 me.transform(ob.matrix_local.copy().invert())
829
830     # Done DUMMYVERT
831     """
832     if IMPORT_AS_INSTANCE:
833         name = filepath.split('\\')[-1].split('/')[-1]
834         # Create a group for this import.
835         group_scn = Scene.New(name)
836         for ob in importedObjects:
837             group_scn.link(ob) # dont worry about the layers
838
839         grp = Blender.Group.New(name)
840         grp.objects = importedObjects
841
842         grp_ob = Object.New('Empty', name)
843         grp_ob.enableDupGroup = True
844         grp_ob.DupGroup = grp
845         scn.link(grp_ob)
846         grp_ob.Layers = Layers
847         grp_ob.sel = 1
848     else:
849         # Select all imported objects.
850         for ob in importedObjects:
851             scn.link(ob)
852             ob.Layers = Layers
853             ob.sel = 1
854     """
855
856     if 0:
857 #       if IMPORT_CONSTRAIN_BOUNDS!=0.0:
858         # Set bounds from objecyt bounding box
859         for ob in importedObjects:
860             if ob.type == 'MESH':
861 #                       if ob.type=='Mesh':
862                 ob.makeDisplayList() # Why dosnt this update the bounds?
863                 for v in ob.getBoundBox():
864                     for i in (0,1,2):
865                         if v[i] < BOUNDS_3DS[i]:
866                             BOUNDS_3DS[i]= v[i] # min
867
868                         if v[i] > BOUNDS_3DS[i + 3]:
869                             BOUNDS_3DS[i + 3]= v[i] # min
870
871         # Get the max axis x/y/z
872         max_axis = max(BOUNDS_3DS[3]-BOUNDS_3DS[0], BOUNDS_3DS[4]-BOUNDS_3DS[1], BOUNDS_3DS[5]-BOUNDS_3DS[2])
873         # print max_axis
874         if max_axis < 1 << 30: # Should never be false but just make sure.
875
876             # Get a new scale factor if set as an option
877             SCALE = 1.0
878             while (max_axis * SCALE) > IMPORT_CONSTRAIN_BOUNDS:
879                 SCALE/=10
880
881             # SCALE Matrix
882             SCALE_MAT = mathutils.Matrix.Scale(SCALE, 4)
883
884             for ob in importedObjects:
885                 if ob.parent is None:
886                     ob.matrix_world =  ob.matrix_world * SCALE_MAT
887
888         # Done constraining to bounds.
889
890     # Select all new objects.
891     print(" done in %.4f sec." % (time.clock()-time1))
892     file.close()
893
894
895 def load(operator, context, filepath="", constrain_size=0.0, use_image_search=True, use_apply_transform=True):
896     load_3ds(filepath, context, IMPORT_CONSTRAIN_BOUNDS=constrain_size, IMAGE_SEARCH=use_image_search, APPLY_MATRIX=use_apply_transform)
897     return {'FINISHED'}