Fix various warnings with clang build, and adjust cmake clang warnings flags
[blender.git] / source / blender / blenlib / intern / threads.c
1 /*
2  * ***** BEGIN GPL LICENSE BLOCK *****
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version. 
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17  *
18  * The Original Code is Copyright (C) 2006 Blender Foundation
19  * All rights reserved.
20  *
21  * The Original Code is: all of this file.
22  *
23  * Contributor(s): none yet.
24  *
25  * ***** END GPL LICENSE BLOCK *****
26  */
27
28 /** \file blender/blenlib/intern/threads.c
29  *  \ingroup bli
30  */
31
32
33 #include <errno.h>
34 #include <string.h>
35
36 #include "MEM_guardedalloc.h"
37
38
39 #include "BLI_blenlib.h"
40 #include "BLI_gsqueue.h"
41 #include "BLI_threads.h"
42
43 #include "PIL_time.h"
44
45 /* for checking system threads - BLI_system_thread_count */
46 #ifdef WIN32
47 #  include <windows.h>
48 #  include <sys/timeb.h>
49 #elif defined(__APPLE__)
50 #  include <sys/types.h>
51 #  include <sys/sysctl.h>
52 #else
53 #  include <unistd.h>
54 #  include <sys/time.h>
55 #endif
56
57 #if defined(__APPLE__) && (PARALLEL == 1) && (__GNUC__ == 4) && (__GNUC_MINOR__ == 2)
58 /* ************** libgomp (Apple gcc 4.2.1) TLS bug workaround *************** */
59 extern pthread_key_t gomp_tls_key;
60 static void *thread_tls_data;
61 #endif
62
63 /* ********** basic thread control API ************ 
64  * 
65  * Many thread cases have an X amount of jobs, and only an Y amount of
66  * threads are useful (typically amount of cpus)
67  *
68  * This code can be used to start a maximum amount of 'thread slots', which
69  * then can be filled in a loop with an idle timer. 
70  *
71  * A sample loop can look like this (pseudo c);
72  *
73  *     ListBase lb;
74  *     int maxthreads = 2;
75  *     int cont = 1;
76  * 
77  *     BLI_init_threads(&lb, do_something_func, maxthreads);
78  * 
79  *     while (cont) {
80  *         if (BLI_available_threads(&lb) && !(escape loop event)) {
81  *             // get new job (data pointer)
82  *             // tag job 'processed 
83  *             BLI_insert_thread(&lb, job);
84  *         }
85  *         else PIL_sleep_ms(50);
86  *         
87  *         // find if a job is ready, this the do_something_func() should write in job somewhere
88  *         cont = 0;
89  *         for (go over all jobs)
90  *             if (job is ready) {
91  *                 if (job was not removed) {
92  *                     BLI_remove_thread(&lb, job);
93  *                 }
94  *             }
95  *             else cont = 1;
96  *         }
97  *         // conditions to exit loop 
98  *         if (if escape loop event) {
99  *             if (BLI_available_threadslots(&lb) == maxthreads)
100  *                 break;
101  *         }
102  *     }
103  * 
104  *     BLI_end_threads(&lb);
105  *
106  ************************************************ */
107 static pthread_mutex_t _malloc_lock = PTHREAD_MUTEX_INITIALIZER;
108 static pthread_mutex_t _image_lock = PTHREAD_MUTEX_INITIALIZER;
109 static pthread_mutex_t _image_draw_lock = PTHREAD_MUTEX_INITIALIZER;
110 static pthread_mutex_t _viewer_lock = PTHREAD_MUTEX_INITIALIZER;
111 static pthread_mutex_t _custom1_lock = PTHREAD_MUTEX_INITIALIZER;
112 static pthread_mutex_t _rcache_lock = PTHREAD_MUTEX_INITIALIZER;
113 static pthread_mutex_t _opengl_lock = PTHREAD_MUTEX_INITIALIZER;
114 static pthread_mutex_t _nodes_lock = PTHREAD_MUTEX_INITIALIZER;
115 static pthread_mutex_t _movieclip_lock = PTHREAD_MUTEX_INITIALIZER;
116 static pthread_mutex_t _colormanage_lock = PTHREAD_MUTEX_INITIALIZER;
117 static pthread_t mainid;
118 static int thread_levels = 0;  /* threads can be invoked inside threads */
119
120 /* just a max for security reasons */
121 #define RE_MAX_THREAD BLENDER_MAX_THREADS
122
123 typedef struct ThreadSlot {
124         struct ThreadSlot *next, *prev;
125         void *(*do_thread)(void *);
126         void *callerdata;
127         pthread_t pthread;
128         int avail;
129 } ThreadSlot;
130
131 static void BLI_lock_malloc_thread(void)
132 {
133         pthread_mutex_lock(&_malloc_lock);
134 }
135
136 static void BLI_unlock_malloc_thread(void)
137 {
138         pthread_mutex_unlock(&_malloc_lock);
139 }
140
141 void BLI_threadapi_init(void)
142 {
143         mainid = pthread_self();
144 }
145
146 /* tot = 0 only initializes malloc mutex in a safe way (see sequence.c)
147  * problem otherwise: scene render will kill of the mutex!
148  */
149
150 void BLI_init_threads(ListBase *threadbase, void *(*do_thread)(void *), int tot)
151 {
152         int a;
153
154         if (threadbase != NULL && tot > 0) {
155                 threadbase->first = threadbase->last = NULL;
156         
157                 if (tot > RE_MAX_THREAD) tot = RE_MAX_THREAD;
158                 else if (tot < 1) tot = 1;
159         
160                 for (a = 0; a < tot; a++) {
161                         ThreadSlot *tslot = MEM_callocN(sizeof(ThreadSlot), "threadslot");
162                         BLI_addtail(threadbase, tslot);
163                         tslot->do_thread = do_thread;
164                         tslot->avail = 1;
165                 }
166         }
167         
168         if (thread_levels == 0) {
169                 MEM_set_lock_callback(BLI_lock_malloc_thread, BLI_unlock_malloc_thread);
170
171 #if defined(__APPLE__) && (PARALLEL == 1) && (__GNUC__ == 4) && (__GNUC_MINOR__ == 2)
172                 /* workaround for Apple gcc 4.2.1 omp vs background thread bug,
173                  * we copy gomp thread local storage pointer to setting it again
174                  * inside the thread that we start */
175                 thread_tls_data = pthread_getspecific(gomp_tls_key);
176 #endif
177         }
178
179         thread_levels++;
180 }
181
182 /* amount of available threads */
183 int BLI_available_threads(ListBase *threadbase)
184 {
185         ThreadSlot *tslot;
186         int counter = 0;
187         
188         for (tslot = threadbase->first; tslot; tslot = tslot->next) {
189                 if (tslot->avail)
190                         counter++;
191         }
192         return counter;
193 }
194
195 /* returns thread number, for sample patterns or threadsafe tables */
196 int BLI_available_thread_index(ListBase *threadbase)
197 {
198         ThreadSlot *tslot;
199         int counter = 0;
200         
201         for (tslot = threadbase->first; tslot; tslot = tslot->next, counter++) {
202                 if (tslot->avail)
203                         return counter;
204         }
205         return 0;
206 }
207
208 static void *tslot_thread_start(void *tslot_p)
209 {
210         ThreadSlot *tslot = (ThreadSlot *)tslot_p;
211
212 #if defined(__APPLE__) && (PARALLEL == 1) && (__GNUC__ == 4) && (__GNUC_MINOR__ == 2)
213         /* workaround for Apple gcc 4.2.1 omp vs background thread bug,
214          * set gomp thread local storage pointer which was copied beforehand */
215         pthread_setspecific(gomp_tls_key, thread_tls_data);
216 #endif
217
218         return tslot->do_thread(tslot->callerdata);
219 }
220
221 int BLI_thread_is_main(void)
222 {
223         return pthread_equal(pthread_self(), mainid);
224 }
225
226 void BLI_insert_thread(ListBase *threadbase, void *callerdata)
227 {
228         ThreadSlot *tslot;
229         
230         for (tslot = threadbase->first; tslot; tslot = tslot->next) {
231                 if (tslot->avail) {
232                         tslot->avail = 0;
233                         tslot->callerdata = callerdata;
234                         pthread_create(&tslot->pthread, NULL, tslot_thread_start, tslot);
235                         return;
236                 }
237         }
238         printf("ERROR: could not insert thread slot\n");
239 }
240
241 void BLI_remove_thread(ListBase *threadbase, void *callerdata)
242 {
243         ThreadSlot *tslot;
244         
245         for (tslot = threadbase->first; tslot; tslot = tslot->next) {
246                 if (tslot->callerdata == callerdata) {
247                         pthread_join(tslot->pthread, NULL);
248                         tslot->callerdata = NULL;
249                         tslot->avail = 1;
250                 }
251         }
252 }
253
254 void BLI_remove_thread_index(ListBase *threadbase, int index)
255 {
256         ThreadSlot *tslot;
257         int counter = 0;
258         
259         for (tslot = threadbase->first; tslot; tslot = tslot->next, counter++) {
260                 if (counter == index && tslot->avail == 0) {
261                         pthread_join(tslot->pthread, NULL);
262                         tslot->callerdata = NULL;
263                         tslot->avail = 1;
264                         break;
265                 }
266         }
267 }
268
269 void BLI_remove_threads(ListBase *threadbase)
270 {
271         ThreadSlot *tslot;
272         
273         for (tslot = threadbase->first; tslot; tslot = tslot->next) {
274                 if (tslot->avail == 0) {
275                         pthread_join(tslot->pthread, NULL);
276                         tslot->callerdata = NULL;
277                         tslot->avail = 1;
278                 }
279         }
280 }
281
282 void BLI_end_threads(ListBase *threadbase)
283 {
284         ThreadSlot *tslot;
285         
286         /* only needed if there's actually some stuff to end
287          * this way we don't end up decrementing thread_levels on an empty threadbase 
288          * */
289         if (threadbase && threadbase->first != NULL) {
290                 for (tslot = threadbase->first; tslot; tslot = tslot->next) {
291                         if (tslot->avail == 0) {
292                                 pthread_join(tslot->pthread, NULL);
293                         }
294                 }
295                 BLI_freelistN(threadbase);
296         }
297
298         thread_levels--;
299         if (thread_levels == 0)
300                 MEM_set_lock_callback(NULL, NULL);
301 }
302
303 /* System Information */
304
305 /* how many threads are native on this system? */
306 int BLI_system_thread_count(void)
307 {
308         int t;
309 #ifdef WIN32
310         SYSTEM_INFO info;
311         GetSystemInfo(&info);
312         t = (int) info.dwNumberOfProcessors;
313 #else 
314 #   ifdef __APPLE__
315         int mib[2];
316         size_t len;
317         
318         mib[0] = CTL_HW;
319         mib[1] = HW_NCPU;
320         len = sizeof(t);
321         sysctl(mib, 2, &t, &len, NULL, 0);
322 #   else
323         t = (int)sysconf(_SC_NPROCESSORS_ONLN);
324 #   endif
325 #endif
326         
327         if (t > RE_MAX_THREAD)
328                 return RE_MAX_THREAD;
329         if (t < 1)
330                 return 1;
331         
332         return t;
333 }
334
335 /* Global Mutex Locks */
336
337 void BLI_lock_thread(int type)
338 {
339         if (type == LOCK_IMAGE)
340                 pthread_mutex_lock(&_image_lock);
341         else if (type == LOCK_DRAW_IMAGE)
342                 pthread_mutex_lock(&_image_draw_lock);
343         else if (type == LOCK_VIEWER)
344                 pthread_mutex_lock(&_viewer_lock);
345         else if (type == LOCK_CUSTOM1)
346                 pthread_mutex_lock(&_custom1_lock);
347         else if (type == LOCK_RCACHE)
348                 pthread_mutex_lock(&_rcache_lock);
349         else if (type == LOCK_OPENGL)
350                 pthread_mutex_lock(&_opengl_lock);
351         else if (type == LOCK_NODES)
352                 pthread_mutex_lock(&_nodes_lock);
353         else if (type == LOCK_MOVIECLIP)
354                 pthread_mutex_lock(&_movieclip_lock);
355         else if (type == LOCK_COLORMANAGE)
356                 pthread_mutex_lock(&_colormanage_lock);
357 }
358
359 void BLI_unlock_thread(int type)
360 {
361         if (type == LOCK_IMAGE)
362                 pthread_mutex_unlock(&_image_lock);
363         else if (type == LOCK_DRAW_IMAGE)
364                 pthread_mutex_unlock(&_image_draw_lock);
365         else if (type == LOCK_VIEWER)
366                 pthread_mutex_unlock(&_viewer_lock);
367         else if (type == LOCK_CUSTOM1)
368                 pthread_mutex_unlock(&_custom1_lock);
369         else if (type == LOCK_RCACHE)
370                 pthread_mutex_unlock(&_rcache_lock);
371         else if (type == LOCK_OPENGL)
372                 pthread_mutex_unlock(&_opengl_lock);
373         else if (type == LOCK_NODES)
374                 pthread_mutex_unlock(&_nodes_lock);
375         else if (type == LOCK_MOVIECLIP)
376                 pthread_mutex_unlock(&_movieclip_lock);
377         else if (type == LOCK_COLORMANAGE)
378                 pthread_mutex_unlock(&_colormanage_lock);
379 }
380
381 /* Mutex Locks */
382
383 void BLI_mutex_init(ThreadMutex *mutex)
384 {
385         pthread_mutex_init(mutex, NULL);
386 }
387
388 void BLI_mutex_lock(ThreadMutex *mutex)
389 {
390         pthread_mutex_lock(mutex);
391 }
392
393 void BLI_mutex_unlock(ThreadMutex *mutex)
394 {
395         pthread_mutex_unlock(mutex);
396 }
397
398 void BLI_mutex_end(ThreadMutex *mutex)
399 {
400         pthread_mutex_destroy(mutex);
401 }
402
403 /* Spin Locks */
404
405 void BLI_spin_init(SpinLock *spin)
406 {
407 #ifdef __APPLE__
408         *spin = OS_SPINLOCK_INIT;
409 #else
410         pthread_spin_init(spin, 0);
411 #endif
412 }
413
414 void BLI_spin_lock(SpinLock *spin)
415 {
416 #ifdef __APPLE__
417         OSSpinLockLock(spin);
418 #else
419         pthread_spin_lock(spin);
420 #endif
421 }
422
423 void BLI_spin_unlock(SpinLock *spin)
424 {
425 #ifdef __APPLE__
426         OSSpinLockUnlock(spin);
427 #else
428         pthread_spin_unlock(spin);
429 #endif
430 }
431
432 #ifndef __APPLE__
433 void BLI_spin_end(SpinLock *spin)
434 {
435         pthread_spin_destroy(spin);
436 }
437 #else
438 void BLI_spin_end(SpinLock *UNUSED(spin))
439 {
440 }
441 #endif
442
443 /* Read/Write Mutex Lock */
444
445 void BLI_rw_mutex_init(ThreadRWMutex *mutex)
446 {
447         pthread_rwlock_init(mutex, NULL);
448 }
449
450 void BLI_rw_mutex_lock(ThreadRWMutex *mutex, int mode)
451 {
452         if (mode == THREAD_LOCK_READ)
453                 pthread_rwlock_rdlock(mutex);
454         else
455                 pthread_rwlock_wrlock(mutex);
456 }
457
458 void BLI_rw_mutex_unlock(ThreadRWMutex *mutex)
459 {
460         pthread_rwlock_unlock(mutex);
461 }
462
463 void BLI_rw_mutex_end(ThreadRWMutex *mutex)
464 {
465         pthread_rwlock_destroy(mutex);
466 }
467
468 /* ************************************************ */
469
470 typedef struct ThreadedWorker {
471         ListBase threadbase;
472         void *(*work_fnct)(void *);
473         char busy[RE_MAX_THREAD];
474         int total;
475         int sleep_time;
476 } ThreadedWorker;
477
478 typedef struct WorkParam {
479         ThreadedWorker *worker;
480         void *param;
481         int index;
482 } WorkParam;
483
484 static void *exec_work_fnct(void *v_param)
485 {
486         WorkParam *p = (WorkParam *)v_param;
487         void *value;
488         
489         value = p->worker->work_fnct(p->param);
490         
491         p->worker->busy[p->index] = 0;
492         MEM_freeN(p);
493         
494         return value;
495 }
496
497 ThreadedWorker *BLI_create_worker(void *(*do_thread)(void *), int tot, int sleep_time)
498 {
499         ThreadedWorker *worker;
500         
501         (void)sleep_time; /* unused */
502         
503         worker = MEM_callocN(sizeof(ThreadedWorker), "threadedworker");
504         
505         if (tot > RE_MAX_THREAD) {
506                 tot = RE_MAX_THREAD;
507         }
508         else if (tot < 1) {
509                 tot = 1;
510         }
511         
512         worker->total = tot;
513         worker->work_fnct = do_thread;
514         
515         BLI_init_threads(&worker->threadbase, exec_work_fnct, tot);
516         
517         return worker;
518 }
519
520 void BLI_end_worker(ThreadedWorker *worker)
521 {
522         BLI_remove_threads(&worker->threadbase);
523 }
524
525 void BLI_destroy_worker(ThreadedWorker *worker)
526 {
527         BLI_end_worker(worker);
528         BLI_freelistN(&worker->threadbase);
529         MEM_freeN(worker);
530 }
531
532 void BLI_insert_work(ThreadedWorker *worker, void *param)
533 {
534         WorkParam *p = MEM_callocN(sizeof(WorkParam), "workparam");
535         int index;
536         
537         if (BLI_available_threads(&worker->threadbase) == 0) {
538                 index = worker->total;
539                 while (index == worker->total) {
540                         PIL_sleep_ms(worker->sleep_time);
541                         
542                         for (index = 0; index < worker->total; index++) {
543                                 if (worker->busy[index] == 0) {
544                                         BLI_remove_thread_index(&worker->threadbase, index);
545                                         break;
546                                 }
547                         }
548                 }
549         }
550         else {
551                 index = BLI_available_thread_index(&worker->threadbase);
552         }
553         
554         worker->busy[index] = 1;
555         
556         p->param = param;
557         p->index = index;
558         p->worker = worker;
559         
560         BLI_insert_thread(&worker->threadbase, p);
561 }
562
563 /* ************************************************ */
564
565 struct ThreadQueue {
566         GSQueue *queue;
567         pthread_mutex_t mutex;
568         pthread_cond_t push_cond;
569         pthread_cond_t finish_cond;
570         volatile int nowait;
571         volatile int cancelled;
572 };
573
574 ThreadQueue *BLI_thread_queue_init(void)
575 {
576         ThreadQueue *queue;
577
578         queue = MEM_callocN(sizeof(ThreadQueue), "ThreadQueue");
579         queue->queue = BLI_gsqueue_new(sizeof(void *));
580
581         pthread_mutex_init(&queue->mutex, NULL);
582         pthread_cond_init(&queue->push_cond, NULL);
583         pthread_cond_init(&queue->finish_cond, NULL);
584
585         return queue;
586 }
587
588 void BLI_thread_queue_free(ThreadQueue *queue)
589 {
590         /* destroy everything, assumes no one is using queue anymore */
591         pthread_cond_destroy(&queue->finish_cond);
592         pthread_cond_destroy(&queue->push_cond);
593         pthread_mutex_destroy(&queue->mutex);
594
595         BLI_gsqueue_free(queue->queue);
596
597         MEM_freeN(queue);
598 }
599
600 void BLI_thread_queue_push(ThreadQueue *queue, void *work)
601 {
602         pthread_mutex_lock(&queue->mutex);
603
604         BLI_gsqueue_push(queue->queue, &work);
605
606         /* signal threads waiting to pop */
607         pthread_cond_signal(&queue->push_cond);
608         pthread_mutex_unlock(&queue->mutex);
609 }
610
611 void *BLI_thread_queue_pop(ThreadQueue *queue)
612 {
613         void *work = NULL;
614
615         /* wait until there is work */
616         pthread_mutex_lock(&queue->mutex);
617         while (BLI_gsqueue_is_empty(queue->queue) && !queue->nowait)
618                 pthread_cond_wait(&queue->push_cond, &queue->mutex);
619         
620         /* if we have something, pop it */
621         if (!BLI_gsqueue_is_empty(queue->queue)) {
622                 BLI_gsqueue_pop(queue->queue, &work);
623                 
624                 if (BLI_gsqueue_is_empty(queue->queue))
625                         pthread_cond_broadcast(&queue->finish_cond);
626         }
627
628         pthread_mutex_unlock(&queue->mutex);
629
630         return work;
631 }
632
633 static void wait_timeout(struct timespec *timeout, int ms)
634 {
635         ldiv_t div_result;
636         long sec, usec, x;
637
638 #ifdef WIN32
639         {
640                 struct _timeb now;
641                 _ftime(&now);
642                 sec = now.time;
643                 usec = now.millitm * 1000; /* microsecond precision would be better */
644         }
645 #else
646         {
647                 struct timeval now;
648                 gettimeofday(&now, NULL);
649                 sec = now.tv_sec;
650                 usec = now.tv_usec;
651         }
652 #endif
653
654         /* add current time + millisecond offset */
655         div_result = ldiv(ms, 1000);
656         timeout->tv_sec = sec + div_result.quot;
657
658         x = usec + (div_result.rem * 1000);
659
660         if (x >= 1000000) {
661                 timeout->tv_sec++;
662                 x -= 1000000;
663         }
664
665         timeout->tv_nsec = x * 1000;
666 }
667
668 void *BLI_thread_queue_pop_timeout(ThreadQueue *queue, int ms)
669 {
670         double t;
671         void *work = NULL;
672         struct timespec timeout;
673
674         t = PIL_check_seconds_timer();
675         wait_timeout(&timeout, ms);
676
677         /* wait until there is work */
678         pthread_mutex_lock(&queue->mutex);
679         while (BLI_gsqueue_is_empty(queue->queue) && !queue->nowait) {
680                 if (pthread_cond_timedwait(&queue->push_cond, &queue->mutex, &timeout) == ETIMEDOUT)
681                         break;
682                 else if (PIL_check_seconds_timer() - t >= ms * 0.001)
683                         break;
684         }
685
686         /* if we have something, pop it */
687         if (!BLI_gsqueue_is_empty(queue->queue)) {
688                 BLI_gsqueue_pop(queue->queue, &work);
689                 
690                 if (BLI_gsqueue_is_empty(queue->queue))
691                         pthread_cond_broadcast(&queue->finish_cond);
692         }
693         
694         pthread_mutex_unlock(&queue->mutex);
695
696         return work;
697 }
698
699 int BLI_thread_queue_size(ThreadQueue *queue)
700 {
701         int size;
702
703         pthread_mutex_lock(&queue->mutex);
704         size = BLI_gsqueue_size(queue->queue);
705         pthread_mutex_unlock(&queue->mutex);
706
707         return size;
708 }
709
710 void BLI_thread_queue_nowait(ThreadQueue *queue)
711 {
712         pthread_mutex_lock(&queue->mutex);
713
714         queue->nowait = 1;
715
716         /* signal threads waiting to pop */
717         pthread_cond_broadcast(&queue->push_cond);
718         pthread_mutex_unlock(&queue->mutex);
719 }
720
721 void BLI_thread_queue_wait_finish(ThreadQueue *queue)
722 {
723         /* wait for finish condition */
724         pthread_mutex_lock(&queue->mutex);
725
726         while (!BLI_gsqueue_is_empty(queue->queue))
727                 pthread_cond_wait(&queue->finish_cond, &queue->mutex);
728
729         pthread_mutex_unlock(&queue->mutex);
730 }
731
732 /* ************************************************ */
733
734 void BLI_begin_threaded_malloc(void)
735 {
736         /* Used for debug only */
737         /* BLI_assert(thread_levels >= 0); */
738
739         if (thread_levels == 0) {
740                 MEM_set_lock_callback(BLI_lock_malloc_thread, BLI_unlock_malloc_thread);
741         }
742         thread_levels++;
743 }
744
745 void BLI_end_threaded_malloc(void)
746 {
747         /* Used for debug only */
748         /* BLI_assert(thread_levels >= 0); */
749
750         thread_levels--;
751         if (thread_levels == 0)
752                 MEM_set_lock_callback(NULL, NULL);
753 }
754