From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (qmail 22050 invoked by alias); 27 Sep 2013 03:49:49 -0000 Mailing-List: contact zsh-workers-help@zsh.org; run by ezmlm Precedence: bulk X-No-Archive: yes List-Id: Zsh Workers List List-Post: List-Help: X-Seq: 31771 Received: (qmail 23680 invoked from network); 27 Sep 2013 03:49:42 -0000 X-Spam-Checker-Version: SpamAssassin 3.3.2 (2011-06-06) on f.primenet.com.au X-Spam-Level: X-Spam-Status: No, score=-1.9 required=5.0 tests=BAYES_00,RCVD_IN_DNSWL_NONE autolearn=ham version=3.3.2 Received-SPF: none (ns1.primenet.com.au: domain at closedmail.com does not designate permitted sender hosts) From: Bart Schaefer Message-id: <130926204956.ZM23921@torch.brasslantern.com> Date: Thu, 26 Sep 2013 20:49:56 -0700 In-reply-to: Comments: In reply to Bart Schaefer "Re: coredump on C-c" (Sep 26, 2:31pm) References: X-Mailer: OpenZMail Classic (0.9.2 24April2005) To: Zsh hackers list Subject: Re: coredump on C-c MIME-version: 1.0 Content-type: text/plain; charset=us-ascii On Sep 26, 2:31pm, Bart Schaefer wrote: } } ... we should be queuing signals. zfree() does it internally, but that's } not enough to stop corruption in freeparamnode() if the signal arrives } before all the parts of the node are cleaned p, and we probably ought to be } queuing signals around the entire "free all the nodes" loop in } resizehashtable(). I'm a bit surprised we haven't run into this before. Nearly everything in hashtable.c would benefit from queuing signals around it. The litte bin_hashinfo() builtin that's enabled by ZSH_HASH_DEBUG already does, and it probably needs it least of anything. How carried away do we want to get with this? For example, there may be a performance hit for queuing signals around all the hash table traversals in the add* and scan* functions. If we assume restartable syscalls that's probably OK for scan, and only a little dangerous for add, which is likely why this hasn't come up in the past. The free* functions definitely need it, though ... which probably means similar functions in other modules do as well. Unless we can be sure they're only called from somewhere that already does queuing, that is. It's a tradeoff between doing the queueing in the smallest area possible, and toggling it as seldom as possible. Here's a stab at it, but I won't commit this until there's been some more discussion. diff --git a/Src/hashtable.c b/Src/hashtable.c index ef18792..5c16efb 100644 --- a/Src/hashtable.c +++ b/Src/hashtable.c @@ -96,6 +96,8 @@ newhashtable(int size, UNUSED(char const *name), UNUSED(PrintTableStats printinf { HashTable ht; + queue_signals(); + ht = (HashTable) zshcalloc(sizeof *ht); #ifdef ZSH_HASH_DEBUG ht->next = NULL; @@ -113,6 +115,8 @@ newhashtable(int size, UNUSED(char const *name), UNUSED(PrintTableStats printinf ht->ct = 0; ht->scan = NULL; ht->scantab = NULL; + + unqueue_signals(); return ht; } @@ -123,6 +127,8 @@ newhashtable(int size, UNUSED(char const *name), UNUSED(PrintTableStats printinf mod_export void deletehashtable(HashTable ht) { + queue_signals(); + ht->emptytable(ht); #ifdef ZSH_HASH_DEBUG if(ht->next) @@ -137,6 +143,8 @@ deletehashtable(HashTable ht) #endif /* ZSH_HASH_DEBUG */ zfree(ht->nodes, ht->hsize * sizeof(HashNode)); zfree(ht, sizeof(*ht)); + + unqueue_signals(); } /* Add a node to a hash table. * @@ -279,6 +287,8 @@ removehashnode(HashTable ht, const char *nam) if (!hp) return NULL; + queue_signals(); + /* else check if the key in the first one matches */ if (ht->cmpnodes(hp->nam, nam) == 0) { ht->nodes[hashval] = hp->next; @@ -294,6 +304,7 @@ removehashnode(HashTable ht, const char *nam) } else if(ht->scan->u.u == hp) ht->scan->u.u = hp->next; } + unqueue_signals(); return hp; } @@ -307,6 +318,8 @@ removehashnode(HashTable ht, const char *nam) } } + unqueue_signals(); + /* else it is not in the list, so return NULL */ return NULL; } @@ -455,6 +468,8 @@ expandhashtable(HashTable ht) struct hashnode **onodes, **ha, *hn, *hp; int i, osize; + queue_signals(); + osize = ht->hsize; onodes = ht->nodes; @@ -472,6 +487,8 @@ expandhashtable(HashTable ht) } } zfree(onodes, osize * sizeof(HashNode)); + + unqueue_signals(); } /* Empty the hash table and resize it if necessary */ @@ -483,6 +500,8 @@ resizehashtable(HashTable ht, int newsize) struct hashnode **ha, *hn, *hp; int i; + queue_signals(); + /* free all the hash nodes */ ha = ht->nodes; for (i = 0; i < ht->hsize; i++, ha++) { @@ -505,6 +524,8 @@ resizehashtable(HashTable ht, int newsize) } ht->ct = 0; + + unqueue_signals(); } /* Generic method to empty a hash table */ @@ -720,11 +741,15 @@ freecmdnamnode(HashNode hn) { Cmdnam cn = (Cmdnam) hn; + queue_signals(); + zsfree(cn->node.nam); if (cn->node.flags & HASHED) zsfree(cn->u.cmd); zfree(cn, sizeof(struct cmdnam)); + + unqueue_signals(); } /* Print an element of the cmdnamtab hash table (external command) */ @@ -884,6 +909,8 @@ freeshfuncnode(HashNode hn) { Shfunc shf = (Shfunc) hn; + queue_signals(); + zsfree(shf->node.nam); if (shf->funcdef) freeeprog(shf->funcdef); @@ -898,6 +925,8 @@ freeshfuncnode(HashNode hn) zfree(shf->sticky, sizeof(*shf->sticky)); } zfree(shf, sizeof(struct shfunc)); + + unqueue_signals(); } /* Print a shell function */ @@ -1129,10 +1158,14 @@ static void freealiasnode(HashNode hn) { Alias al = (Alias) hn; - + + queue_signals(); + zsfree(al->node.nam); zsfree(al->text); zfree(al, sizeof(struct alias)); + + unqueue_signals(); } /* Print an alias */ @@ -1327,6 +1360,8 @@ freehistdata(Histent he, int unlink) if (!(he->node.flags & (HIST_DUP | HIST_TMPSTORE))) removehashnode(histtab, he->node.nam); + queue_signals(); + zsfree(he->node.nam); if (he->nwords) zfree(he->words, he->nwords*2*sizeof(short)); @@ -1341,4 +1376,6 @@ freehistdata(Histent he, int unlink) he->down->up = he->up; } } + + unqueue_signals(); }