[xiph-commits] r13849 - trunk/sushivision
xiphmont at svn.xiph.org
xiphmont at svn.xiph.org
Mon Sep 17 18:46:32 PDT 2007
Author: xiphmont
Date: 2007-09-17 18:46:32 -0700 (Mon, 17 Sep 2007)
New Revision: 13849
Modified:
trunk/sushivision/main.c
trunk/sushivision/panel-2d.c
trunk/sushivision/panel-2d.h
Log:
Nothing to see here
Modified: trunk/sushivision/main.c
===================================================================
--- trunk/sushivision/main.c 2007-09-17 23:22:20 UTC (rev 13848)
+++ trunk/sushivision/main.c 2007-09-18 01:46:32 UTC (rev 13849)
@@ -59,27 +59,22 @@
// and the worker threads must also be locked. This includes the
// global panel list, each panel, each plane within the panel (and
// subunits of the planes), the plot widget's canvas buffer, and the
-// plot widget's other render access. Plot widget access is guarded
-// by standard mutexes. The panel lists, panels and planes require
-// greater concurrency and use read/write mutexes.
+// plot widget's other render access. Most access is guarded by
+// standard mutexes and synchronization serial numbers; however these
+// internal synchronization structures (which exist inside panels and
+// planes) must themselves be guarded against asynchronous destruction
+// while worker threads are 'inside' panels and planes. For this
+// reason, planes and the master plane list are also protected by rw
+// locks which are read-locked by the worker threads to signify 'in
+// use'.
// lock acquisition order must move to the right:
-// GDK -> panel_list -> panel -> plane, plane_internal -> plot_main -> plot_bg
+// GDK -> panel_list -> panel -> plane locks -> plot_main -> plot_data
//
// Multiple panels (if needed) must be locked in order of list
// Multiple planes (if needed) must be locked in order of list
// Each plane type has an internal order for locking internal subunits
-// Memory pointers/structures are protected by a r/w lock that is held
-// by any thread 'entering the abstraction' for the duration the
-// thread is assuming the structure will continue to exist. It is
-// held even during long-latency operations to guarantee memory
-// consistency.
-
-// The data inside a memory structure is protected by a second r/w
-// lock inside the structure that is dropped when possible during
-// long-duration operations.
-
// mutex condm is only for protecting the worker condvar
static pthread_mutex_t worker_condm = PTHREAD_MUTEX_INITIALIZER;
static pthread_cond_t worker_cond = PTHREAD_COND_INITIALIZER;
Modified: trunk/sushivision/panel-2d.c
===================================================================
--- trunk/sushivision/panel-2d.c 2007-09-17 23:22:20 UTC (rev 13848)
+++ trunk/sushivision/panel-2d.c 2007-09-18 01:46:32 UTC (rev 13849)
@@ -32,32 +32,299 @@
#include <cairo-ft.h>
#include "internal.h"
-/* a panel is essentially four things:
+/* a panel is essentially:
- A collection of UI widgets
- A collection of graphical planes
- Code for managing concurrent data computation for planes
- Code for managing concurrent rendering operations for planes
-
+ 1) a repository for planes
+ 2) code for welding the planes together
+ 3) a repository for dim widgets
+ 4) code for welding UI elements together.
*/
+
+/* from API or GTK thread */
+void _sv_panel_recompute(sv_panel_t *p){
+ gdk_lock();
+ // 1) write lock panel.computelock
+ // 2) cache plot scales
+ // 3) cache dim state
+ // 4) cache axis selections
+ // 5) increment compute_serialno
+ // 6) release lock
+ // 7) wake workers
+ // worker thread process order; each step performed for all panels,
+ // proceed to next step only if no panels are still in the previous
+ // step:
-/* from API or GTK thread */
-void _sv_panel_recompute(sv_panel_t *p){
- gdk_lock();
+ // > recompute setup 0
+ // > bg realloc/scale 1
+ // > bg scale render 2
+ // > bg legend render 3
+ // > bg expose 4
+ // > data realloc 5
+ // > data yscale 6
+ // > data xscale 7
+ // > image realloc 8
+ // > image render work 9
+ // > bg render from planes 10
+ // > data computation work 11
+ // > idle 100
+
+ // recompute begins from 0
+ // relegend begins from 3
+ // remap begins from 4
+
+ // Yes, that's right-- UI output comes first (UI is never dead, even
+ // briefly, and that includes graphs), however progressive UI work
+ // (map and bg render) is purposely throttled.
+
+ // Note: wake_workers after completing a serialized operation that
+ // should be followed by a parallelized operation.
+
+ // locking order:
+ // GDK ->
+ // panellist.activelock ->
+ // panel.activelock ->
+ // panel.planelock ->
+ // panel.panellock ->
+ // panel.datalock ->
+
+
+ // worker thread pseudocode, for each panel:
- // collect and cache the data a panel needs to recompute
- // dimension value snapshots
- // pending axes scalespaces
+ // > lock panel.panellock
+ // > if panel.busy
+ // > release panel.panellock
+ // > return (status_busy)
+ // > if panel.recompute_pending
+ // > panel.busy = 1
+ // > panel.recompute_pending = 0
+ // > panel.comp_serialno++
+ // > localize data to generate scales
+ // > release panel.compute_m
- // collect and cache the data a panel needs to remap
- //
+ // > if panel.busy == 1 (can only be set here if we're beginning a recompute)
+ // (recompute setup)
+ // > drop panel.status_m
+ // (because this case is the only way the serialno can be upped
+ // and busy prevents any other threads from getting into
+ // recompute setup, we don't need to hold the status lock to
+ // update status fields here)
+ // > for each plane in panel
+ //
+ // 2D planes:
+ // > ++plane.map_serialno
+ // > plane.data_realloc = 1
+ // > plane.data_xscale = 1
+ // > plane.data_yscale = 1
+ // > plane.image_realloc = 1
+ // > clear all plane.image_flags
+
+ // > write lock plane.data_m
+ // > compute/store new pending scales
+ // > drop plane.data_m write lock
+ // > bg.resize = 1
+ // > plane.busy = 0
+ // > drop plane_status_m
+ // > return (status_working)
+
+ // (bg operations)
+
+ // > if bg.comp_serialno != serialno
+
+ // > if bg.task == 2
+
+ // > if bg.task == 3
+
+
+ // > if earliest_task < "data_realloc" then earliest_task = "data_realloc"
+ // > if earliest_task < "map_render"
+ //
+ // (realloc/xscale/yscale)
+ // > for each plane in panel, in order
+ //
+ // 2D planes:
+ // > if(plane.task == 4)
+ // > if(plane.task == 5)
+ // > if(plane.task == 6)
+
+ // > if earliest_task < "map_render" the earliest_task = "map_render"
+ // > if earliest_task == #map_render
+ // > for each plane in panel, in order
+ // 2D planes:
+ // > if(plane.task == 7)
+
+ // > for each plane in panel, in order
+ // 2D planes:
+ // > if(plane.task == 8)
+
+ // > drop plane_status_m
+ // > return (status_idle)
+
+
+
+
+
+
+
+ // map order within worker thread:
+ // > for each plane
+ // 2D planes:
+ // > lock plane.image_status_m
+ // > local mapno = map_serialno
+ // > if image_serialno != local serialno
+ // > read lock panel.computelock
+ // > local serialno = compute_serialno (avoid race)
+ // > image_serialno = compute_serialno
+ // > mapno = ++map_serialno
+ // > compute new pending image scales
+ // > drop panel.computelock read lock
+ // > image_throttled = 0
+ // > image_next = -1
+ // > clear all image_flags
+ // > write lock plane.image_m
+ // > drop plane.image_status_m
+ // > resize/scale image plane
+ // > drop plane.image_m write lock
+ // > GDK lock; expose; GDK unlock
+ // > return (status_working)
+ //
+ // > if image_next > -1
+ // > if image_throttled
+ // > if throttle period not elapsed
+ // > drop plane.image_status_m
+ // > return (status_idle)
+ // > scan image_flags forward from image_next
+ // > if found a set flag:
+ // > image_next = line number
+ // > image_flags[line] = 0
+ // > drop plane.image_status_m
+ // > read lock plane.data_m
+ // > render new line into local vector
+ // > unlock plane.data_m
+ // > lock plane.image_status_m
+ // > if mapno = map_serialno
+
+ // > read lock plane.image_m
+ // (not write lock; write access is serialized by status
+ // checks, and a premature read by the background renderer
+ // is gauarnteed to be corrected)
+ // > write rendered line to plane
+ // > drop plane.image_m read lock
+ // > dirty background line; issue throttled bg flush
+ // > drop plane.image_status_loc
+ // > return (status_working)
+ // (All mapping work to date completed)
+
+
+
+ // when all mapping work in all planes is finished, issue immediate/complete bg flush
+
+
+
+ // compute order within worker thread:
+ // > for each plane
+ // 2D planes:
+ // > lock plane.data_status_m
+ // > if data_serialno != local serialno
+ // > read lock panel.computelock
+ // > local serialno = compute_serialno (avoid race)
+ // > data_serialno = compute_serialno
+ // > compute new pending scales
+ // > drop panel.computelock read lock
+ // > data_task=1 (realloc)
+ // > data_waiting = 0
+ // > data_incomplete = 1
+ // > write lock plane.data_m
+ // > drop plane.data_status_m
+ // > store pending scales
+ // > reallocate / copy
+ // > drop plane.data_m write lock
+ // > lock plane.data_status_m
+ // > if local_serialno == data_serialno
+ // > data_task=2 (xscale)
+ // > data_waiting = data_lines
+ // > data_incomplete = data_lines
+ // > data_next = 0
+ // > wake workers
+ // > unlock plane.data_status_m
+ // > return (status_working)
+ //
+ // > if data_waiting>0
+ // > data_waiting--
+ // > if data_task == xscale
+ // > local current=data_next++
+ // > read lock plane.data_m
+ // > drop plane.data_status_m
+ // > fast_xscale current line
+ // > drop plane.data_m read lock
+ // > write lock plane.data_status_m
+ // > if local_serialno == data_serialno
+ // > data_incomplete--
+ // > if data_incomplete == 0
+ // > data_task=3 (yscale)
+ // > data_waiting = data_width
+ // > data_incomplete = data_width
+ // > data_next = 0
+ // > wake workers
+ // > drop plane.data_status_m write lock
+ // > return (status_working)
+ // > if data_task == yscale
+ // > local current=data_next++
+ // > read lock plane.data_m
+ // > drop plane.data_status_m
+ // > fast_yscale current line
+ // > drop plane.data_m read lock
+ // > write lock plane.data_status_m
+ // > if local_serialno == data_serialno
+ // > data_incomplete--
+ // > if data_incomplete == 0
+ // > data_task=4 (compute)
+ // > data_waiting = data_lines
+ // > data_incomplete = data_lines
+ // > data_next = 0
+ // > drop plane.data_status_m write lock
+ // > mark all map lines in need of refresh
+ // > wake workers
+ // > return (status_working)
+ // > drop plane.data_status_m write lock
+ // > return (status_working)
+ // > if data_task == compute
+ // > local current=data_next++
+ // > map local current to y
+ // > read lock plane.data_m
+ // > drop plane.data_status_m
+ // > fast_yscale current line
+ // > drop plane.data_m read lock
+ // > write lock plane.data_status_m
+ // > if local_serialno == data_serialno
+ // > data_incomplete--
+ // > if data_incomplete == 0
+ // > data_task=4 (compute)
+ // > data_waiting = data_lines
+ // > data_incomplete = data_lines
+ // > data_next = 0
+ // > drop plane.data_status_m write lock
+ // > mark all map lines in need of refresh
+ // > wake workers
+ // > return (status_working)
+ // > drop plane.data_status_m write lock
+ // > return (status_working)
+
+
+
+ // return (status_idle)
+
+
+
+
gdk_unlock();
}
+
+
static void _sv_plane2d_set_recompute(_sv_plane2d_t *z){
pthread_rwlock_wrlock(z->data_m);
pthread_rwlock_wrlock(z->image_m);
Modified: trunk/sushivision/panel-2d.h
===================================================================
--- trunk/sushivision/panel-2d.h 2007-09-17 23:22:20 UTC (rev 13848)
+++ trunk/sushivision/panel-2d.h 2007-09-18 01:46:32 UTC (rev 13849)
@@ -84,34 +84,54 @@
// subtype
// objective data
+
+ // although we lock/protect the data and image memory allocation, we
+ // generally don't write-lock updates to the data/image planes.
+ // Because any write operation finishes with a status update that
+ // flushes changes out to the next stage and all data flows in only
+ // one direction in the rendering pipeline, any inconsistent/stale
+ // data is corrected as soon as complete data is available.
+
float *data; // data size
_sv_scalespace_t data_x;
_sv_scalespace_t data_y;
_sv_scalespace_t data_x_it;
_sv_scalespace_t data_y_it;
- int data_serialno;
- int data_xscale_waiting;
- int data_xscale_incomplete;
- int data_yscale_waiting;
- int data_yscale_incomplete;
- int data_compute_waiting;
- int data_compute_incomplete;
- int data_nextline;
- unsigned char *data_flags;
- pthread_mutex_t data_m;
-
- // image plane
_sv_ucolor_t *image; // panel size;
- int image_serialno;
_sv_scalespace_t image_x;
_sv_scalespace_t image_y;
- struct sv_zmap image_map;
- int image_waiting;
- int image_incomplete;
- int image_nextline;
- unsigned char *image_flags;
- pthread_mutex_t image_m;
+ // a data read lock is also used for coordinated non-exclusive
+ // writes to different parts of the array; data flow is set up such
+ // that reading inconsistent data/image values is only ever cosmetic
+ // and temporary; event ordering will always guarantee consistent
+ // values are flushed forward when a write is completed. write
+ // locking is only used to enforce serialized access to prevent
+ // structural or control inconsistency.
+ pthread_rwlock_t data_m;
+
+ int map_serialno;
+ int task;
+ int data_waiting;
+ int data_incomplete;
+ int data_next;
+ int image_next;
+ int *image_flags;
+ // status locked by panel
+
+ // resampling helpers; locked via data_lock/data_serialno
+ unsigned char *resample_xdelA;
+ unsigned char *resample_xdelB;
+ int *resample_xnumA;
+ int *resample_xnumB;
+ float resample_xscalemul;
+
+ unsigned char *resample_ydelA;
+ unsigned char *resample_ydelB;
+ int *resample_ynumA;
+ int *resample_ynumB;
+ float *resample_yscalemul;
+
// ui elements; use gdk lock
_sv_mapping_t *mapping;
_sv_slider_t *scale;
@@ -128,35 +148,31 @@
} _sv_plane;
typedef struct {
- pthread_rwlock_t memlock;
- pthread_rwlock_t datalock;
+ pthread_rwlock_t activelock;
+ pthread_mutex_t panellock;
+ int busy;
- GtkWidget *obj_table;
- GtkWidget *dim_table;
+ // pending computation payload
+ int recompute_pending;
+ _sv_scalespace_t plot_x;
+ _sv_scalespace_t plot_y;
+ double *dim_lo;
+ double *dim_v;
+ double *dim_hi;
+ // composite 'background' plane
_sv_plane_bg_t *bg;
+ // objective planes
int planes;
_sv_plane_t **plane_list;
int next_plane;
+ pthread_mutex_t planelock; // locks plane status, not data
- /* cached z-plane resampling helpers */
- int resample_serialno;
- unsigned char *ydelA;
- unsigned char *ydelB;
- int *ynumA;
- int *ynumB;
- float yscalemul;
+ // UI elements
+ GtkWidget *obj_table;
+ GtkWidget *dim_table;
- /* scales and data -> display scale mapping */
- _sv_scalespace_t x;
- _sv_scalespace_t x_v;
- _sv_scalespace_t x_i;
- _sv_scalespace_t y;
- _sv_scalespace_t y_v;
- _sv_scalespace_t y_i;
-
- int scales_init;
double oldbox[4];
GtkWidget **dim_xb; // X axis selector buttons
@@ -173,13 +189,5 @@
double *fout;
int fout_size;
- /* cached resampling helpers; x is here becasue locking overhead
- would be prohibitive to share between threads */
- int serialno;
- unsigned char *xdelA;
- unsigned char *xdelB;
- int *xnumA;
- int *xnumB;
- float xscalemul;
} _sv_bythread_cache_2d_t;
More information about the commits
mailing list