
#include "V_M_UC.h"
#include "sys_malloc.h"
#include "mem.h"
/*++ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* The "chunk" module is a memory allocator.  It is designed to sit  on
* top  of  the  malloc  package  and  add debugging and error checking
* capabilities.  If you replace all your calls of malloc() and  free()
* with  GetChunk()  and RelChunk(), you can use this module's checking
* and still satisfy systems that insist that you must use the  library
* malloc.  You can also link in things like the perl malloc package if
* you wish, and add error checking to it.  See mem.d for details.
*
* AUTHOR: John Chambers.
*
* CHANGES:
*   96/12/05 John Chambers
*     Added code to use pthread_lock_global_np if threads is enabled.
++*/

global void   addchunk();
global Chunk* badchunk();
global void   dmpchunk();
global void   dmpchunklist();
global Chunk* hdrchunk();
static Chunk* getheader();
static Chunk* mrgchunk();
static Chunk* newchunk();
static Chunk* splitchunk();

global Chunk* chunklist = 0;
static Chunk* chkchp = 0;
static Chunk* badchunks = 0;
static Chunk* freechunks = 0;
global int    multchunks = 1;		/* How many chunks to malloc at once */
global Chunk* lochunk = 0;
static char*  maxaddr = (char*)0xFFFFFFFF;
global int    memalignsize = MEMU;	/* Memory alignment size */
global int    memminsize = 1024;	/* Minimum size chunk to malloc */
global int    memohead = MEMO;		/* Memory overhead for malloc */

/*++ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* CALL:
*   void addchunk(val,dsc)
*   	Chunk* val;
*   	char*  dsc;
*
* DESCRIPTION:
*
* Add a chunk to the global chunk list.  This  is  currently  done  by
* scanning  the  list  for  the  appropriate  place;  it would be more
* efficient to make the list into a binary tree.
*
* AUTHOR: John Chambers.
++*/
void addchunk(val,dsc)
	Chunk* val;
	char*  dsc;
{	Chunk* chp;
	char* nxtaddr;
	Fenter("addchunk");
	V7M "Called for %08X=%08X:%04X=%d %s",val,val->addr,val->size,val->size,Dsps(dsc,-1) D;
#if defined(USE_pthreads) && (USE_pthreads > 0)
	if (V_locking && use_pthreads) {
		pthread_lock_global_np();
		P7 "\t\t\tpthread_lock_global_np() in %s",Fctname P;
		fflush(Vout);
	}
#endif /*USE_pthreads*/
	for (chp = chunklist; chp; chp = chp->next) {
		nxtaddr = chp->next ? chp->next->addr : maxaddr;










		if (Ule(chp->addr,val->addr) && Ule(val->addr,nxtaddr)) {
			val->next = chp->next;
			chp->next = val;
			V7M "Chunk insert : %08X < %08X < %08X"
				,Uval(chp->addr),Uval(val->addr),Uval(nxtaddr) D;
			Done;
		}


	}
/*
* If we get here, then chunklist is empty or the first item's addr is higher than
* the new chunk we just got.  The latter is unlikely, but possible.
*/
	nxtaddr = chunklist ? chunklist->addr : maxaddr;
	V7M "Chunk insert at start: %08X < %08X",Uval(val->addr),Uval(nxtaddr) D;
	val->next = chunklist;
	chunklist = val;
done:
#if defined(USE_pthreads) && (USE_pthreads > 0)
	if (V_locking && use_pthreads) {
		P7 "\t\t\tpthread_unlock_global_np() in %s",Fctname P;
		fflush(Vout);
		pthread_unlock_global_np();
	}
#endif /*USE_pthreads*/
	Fexit;
}

/*++ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* CALL:
*   Chunk* badchunk(p,dsc)
*   	MEMP   p;
*   	char*  dsc;
*
* DESCRIPTION:
*
* Record the address p as a bad chunk reference.  The return value  is
* the  new  chunk  header,  which will be at the head of the badchunks
* list.
*
* AUTHOR: John Chambers.
++*/
Chunk* badchunk(p,dsc)
	MEMP   p;
	char*  dsc;
{	Chunk* val;
	Fenter("badchunk");
#if defined(USE_pthreads) && (USE_pthreads > 0)
	if (V_locking && use_pthreads) {
		pthread_lock_global_np();
		P7 "\t\t\tpthread_lock_global_np() in %s",Fctname P;
		fflush(Vout);
	}
#endif /*USE_pthreads*/
	if (!(val = getheader(dsc))) Fail;
	val->addr = p;
	val->next = badchunks;
	badchunks = val;
fail:
#if defined(USE_pthreads) && (USE_pthreads > 0)
	if (V_locking && use_pthreads) {
		P7 "\t\t\tpthread_unlock_global_np() in %s",Fctname P;
		fflush(Vout);
		pthread_unlock_global_np();
	}
#endif /*USE_pthreads*/
	FExit;
	return val;
}

/*++ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* CALL:
*   chkchunk(p,dsc)
*   	MEMP   p;
*   	char*  dsc;
*
* DESCRIPTION:
*
* Do a validity check for a chunk.  This routine returns the number of
* errors  found,  and  may produce error messages of various levels in
* addition to setting errno.  Note that the parameter is a pointer  to
* an allocated block, not to the Chunk header structure.
*
* A return value of zero means that p is an allocated chunk.
*
* Error codes returned:
*
* EFAULT  p is an unallocated chunk.
* EINVAL  p is inside but isn't the address of a chunk.
* ENOMEM  p is outside all chunks.
*
* AUTHOR: John Chambers.
*
++*/
/*ARGSUSED*/
int chkchunk(p,dsc)
	MEMP   p;
	char*  dsc;
{	int    errs=0;
	Chunk* chp;

	V7M "Chunk %08X contains %08X.",chp,p D;
	chkchp = 0;
	for (chp = chunklist; chp; chp = chp->next) {
		if (Ule(chp->addr,p)) {
			V7M "Chunk %08X contains %08X.",chp,p D;
			if (chp->flag.alloc) {
				V7M "Chunk %08X contains %08X but isn't allocated.",chp,p D;
				chkchp = chp;
				errno = EFAULT;
				Fail;
			} else {
				V7M "Chunk %08X contains %08X and is allocated.",chp,p D;
				chkchp = chp;
				Done;
			}
		}
		if (Ult(chp->addr,p) && Ult(p,chp->addr+chp->size)) {
			V7M "%08X is inside a chunk.",p D;
			chkchp = chp;
			errno = EINVAL;
			Fail;
		}
	}
	errno = ENOMEM;
fail:
done:

	return errs;
}

/*++ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* CALL:
*   DmpChunk(chp,dsc)
*   	Chunk* chp;
*
* DESCRIPTION:
*   Produce a symbolic dump of a single chunk.
*
* AUTHOR: John Chambers.
*
++*/
/*ARGSUSED*/
void dmpchunk(chp,dsc)
	Chunk* chp;
	char*  dsc;
{	char   msg[100], *mp=msg;
	sprintf(mp,"Chunk %08X next=%08X addr=%08X size=%04X=%d"
		,chp,chp->next,chp->addr,chp->size,chp->size);
	mp += Strlen(mp);
	if (chp->algn) sprintf(mp," algn=%d",chp->algn);
	mp += Strlen(mp);
	if (chp->flag.free) sprintf(mp," free");
	mp += Strlen(mp);
	if (chp->flag.alloc) sprintf(mp," alloc");
	mp += Strlen(mp);
	if (chp->flag.malloc) sprintf(mp," malloc");
	mp += Strlen(mp);
#if MEMINFO > 0
	if (*chp->desc) sprintf(mp," <%s>",Dsps(chp->desc,-1));
	mp += Strlen(mp);
#endif
	*mp++ = '\n';
	Write(Vout?Fileno(Vout):2,msg,mp-msg);
}

/*++ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* CALL:
*   dmpchunklist()
*
* DESCRIPTION:
*   Produce a symbolic dump of the entire chunk list.
*
* AUTHOR: John Chambers.
*
++*/
void dmpchunklist()
{	Chunk* chp;

	for (chp = chunklist; chp; chp = chp->next) {
		DmpChunk(chp,"chunk");
	}

}

/*++ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* CALL:
*   Chunk* HdrChunk(p,d)
*   	MEMP   p;
*   	char*  d;
*
* DESCRIPTION:
*   Find and return the header for the chunk whose addr is p.  Null
*   is returned if p can't be found.
*
* AUTHOR: John Chambers.
++*/
/*ARGSUSED*/
Chunk* hdrchunk(p,d)
	MEMP   p;
	char*  d;
{	Chunk* r;
	for (r = chunklist; r; r = r->next) if (r->addr == p) return r;
	return 0;
}

/*++ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* CALL:
*   MEMP  getchunk(want,align,dsc)
*   	MEMS  want;   -- Minimum number of bytes to get.
*       int   align;  -- Align to a multiple of this many bytes.
*   	char* dsc;    -- Description, for diagnostics.
*
*
* DESCRIPTION:
*
* Get an aligned chunk of (at least) want bytes of  memory.   This  is
* basically  a  wrapper around malloc or whatever memory allocator you
* wish to use. We do lots of sanity checking and debugging.  The extra
* argument,  dsc,  which  should  be  a printable string to be used in
* error messages to identify what the caller was trying to allocate.
*
* The align value is typically 2, 4 or 8; it is the desired alignment.
* The address returned will be a multiple of align.
*
* Note  that  we zero out the block of memory; most versions of malloc
* don't do this.
*
* AUTHOR: John Chambers.
*
++*/
MEMP getachunk(want,align,dsc)
	MEMS  want;
	int   align;
	char* dsc;
{	MEMP  v=0;
	Chunk* chp=0;
	Chunk* tmp=0;
	int   e=0, i, n;
	MEMP  base=0;		/* Aligned chunk address */
	MEMS  padto=0;	/* Size after possible alignment */
	Fenter("getachunk");
	V7M "getachunk(%d,%d,%08X) called.",want,align,dsc D;
#if defined(USE_pthreads) && (USE_pthreads > 0)
	if (V_locking && use_pthreads) {
		pthread_lock_global_np();
		P7 "\t\t\tpthread_lock_global_np() in %s",Fctname P;
		fflush(Vout);
	}
#endif /*USE_pthreads*/
	if (!dsc) dsc = m_unnamed;
	if (align < 1) align = memalignsize;
	V8M "getchunk(%d,\"%s\") called.",want,dsc D;
	if (want <= 0) {
		errno = EINVAL;
		P2 m_nomem,pname,want,dsc,Errinfo P;
		Fail;
	}
	errno = 0;
	padto = MEMALIGN(want,align);
	V8M "Get %d (rounded up to %d) for %s.",want,padto,dsc D;
	for (chp = chunklist; chp; chp = chp->next) {
		if (Vflg['M']>6) DmpChunk(chp,Fctname);
		if (chp->flag.free && (chp->size >= padto)) {
			V7M "Chunk %08X is %d bytes; it's big enough.",chp->addr,chp->size D;
			/*
			* The chunk is big enough, but it may not be aligned as
			* the caller wishes. We now calculate the first aligned
			* address within the chunk and the number of bytes that
			* are usable with that as the base address.
			*/
			base = (MEMP)MEMALIGN(chp->addr,align);	/* Align the chunk */
			n = chp->size - (base - chp->addr);
			V7M "Chunk %08X:%d aligns to %08X:%d.",chp->addr,chp->size,base,n D;
			if (n >= padto) {	/* Big enough after alignment? */
				V7M "Chunk %08X:%d is big enough.",base,n D;
				/*
				* If there are enough bytes left after alignment, we
				* now check to see if there's a little initial chunk
				* to cut off.  If so, we split the chunk; if not, we
				* just use the entire chunk.
				*/
				if ((i = (base - chp->addr)) > 0) {
					V7M "Chunk %08X is %d+%d bytes; split it.",chp->addr,i,n D;
					if (!(tmp = splitchunk(chp,i,dsc))) continue;
					chp = tmp;
				}
				chp->algn = align;
				Done;
			}
		}
	}
	if (!(chp = newchunk(padto,dsc))) {
		V3M "Can't get %d-byte chunk [%s]",padto,dsc D;
		Fail;
	}
done:
	/*
	* If the chunk is bigger than what the caller requested, we now  split
	* of  the  remainder  into  a new chunk.  This means that we are quite
	* capable of returning chunks  that  are  exactly  adjacent,  with  no
	* padding  in  between.   If  the  caller writes beyond the end of the
	* chunk, it is highly likely to gargage some other chunk.  Note:   you
	* may use want instead of padto here, to get maximum use of space, but
	* the result will tend to be for cpu time dealing with the  misaligned
	* chunks that will result.
	*/
	if ((Sval(chp->size) - Sval(padto)) > 0) {
		V7M "Chunk %08X is %d+%d bytes; split it.",
			chp->addr,padto,(chp->size - padto) D;
		splitchunk(chp,padto,dsc);
	}
	chp->algn = align;
	chp->flag.free = 0;
	chp->flag.alloc = 1;
#if MEMINFO > 0
	V7M "Copy %d bytes %s to %08X->desc",MEMDESC,Dsps(dsc,-1),chp D;
	Strncpy(chp->desc,dsc,MEMDESC);	/* There's an extra null byte */
#endif
	v = chp->addr;

	BZero(v,chp->size);		/* Make sure it's zeroed */
	errno = e;
	V7M "Give %08X=[%08X:%04X=%d] (wanted %d for %s)",chp,v,chp->size,chp->size,want,dsc D;
	if (e && (errno = e))
		V7M m_malloc,want,dsc,Errinfo D;
fail:
	V7M "Returned %lX size %u for %s e=%d.",v,chp?chp->size:0,dsc,e D;
	if (Vflg['M']>7) dmpchunklist();
#if defined(USE_pthreads) && (USE_pthreads > 0)
	if (V_locking && use_pthreads) {
		P7 "\t\t\tpthread_unlock_global_np() in %s",Fctname P;
		fflush(Vout);
		pthread_unlock_global_np();
	}
#endif /*USE_pthreads*/
	Fexit;
	return(v);
}

/*++ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* CALL:
*   Chunk* getheader(dsc)
*   	char*  dsc;
*
* DESCRIPTION:
*
* AUTHOR: John Chambers.
++*/
static Chunk* getheader(dsc)
	char*  dsc;
{	Chunk* val=0;
	int    i;

	V7M "Called." D;
	V5M "freechunks=%X multchunks=%d sizeof(Chunk)=%d",freechunks,multchunks,sizeof(Chunk) D;
	if (!freechunks) {		/* Is there a free chunk header? */
		freechunks = (Chunk*)MallocM(multchunks*sizeof(Chunk),"ChunkHdr");
		V5M "freechunks=%X",freechunks D;
		for (i=1; i<multchunks; i++) {	/* Link the chunks into a list */
			freechunks[i-1].next = &freechunks[i];
		}
		if (freechunks) {
			V5M "Got %d-byte chunk for %d headers (%s)",multchunks*sizeof(Chunk),multchunks,dsc D;
		} else {
			V2M "Can't get %d-byte Chunk header for %s",sizeof(Chunk),dsc D;
			Fail;
		}
	}
	freechunks = (val = freechunks)->next;
	BZero(val,sizeof(Chunk));	/* Make sure it's empty */
fail:

	return val;
}

/*++ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* CALL:
*   Chunk* mrgchunk(chp,dsc)
*   	Chunk* chp;
*   	char*  dsc;
*
* DESCRIPTION:
*
* Merge a chunk with the next one.  The next chunk header ends  up  in
* the freechunks list.  The return value is chp if we succeed, or null
* if we fail.  Failure only occurs if the caller calls us with a bogus
* merge request.
*
* AUTHOR: John Chambers.
*
++*/
static Chunk* mrgchunk(chp,dsc)
	Chunk* chp;
	char*  dsc;
{	Chunk* r=0;
	Chunk* nxt;
	Fenter("mrgchunk");
#if defined(USE_pthreads) && (USE_pthreads > 0)
	if (V_locking && use_pthreads) {
		pthread_lock_global_np();
		P7 "\t\t\tpthread_lock_global_np() in %s",Fctname P;
		fflush(Vout);
	}
#endif /*USE_pthreads*/
	ChkNull(chp,"Chunk pointer");
	ChkNull(chp->addr,"Chunk address");
	V7M "Called for %08X=%08X:%04X=%d %s",chp,chp->addr,chp->size,chp->size,Dsps(dsc,-1) D;
	nxt = chp->next;
	ChkNull(nxt,"Next chunk");
	ChkNull(nxt->addr,"Next chunk address");
	if ((chp->addr + chp->size) != nxt->addr) {
		V2M "\t\t\tCan't merge %08X:%04X=%d and %08X:%04X=%d",
			chp->addr,chp->size,chp->size,
			nxt->addr,nxt->size,nxt->size D;
		Fail;
	}
	chp->next = nxt->next;	/* Cut out the next chunk from the chunk list */
	chp->size += nxt->size;	/* Merge the chunks */
	chp->flag.malloc =
	nxt->flag.malloc = 0;	/* They can't be freed after a merge */
	BZero(nxt,sizeof(Chunk));
	nxt->next = freechunks;	/* Add the chunk header to the free list */
	freechunks = nxt;
	if (Vflg['M']>7) dmpchunklist();
	r= chp;
fail:
#if defined(USE_pthreads) && (USE_pthreads > 0)
	if (V_locking && use_pthreads) {
		P7 "\t\t\tpthread_unlock_global_np() in %s",Fctname P;
		fflush(Vout);
		pthread_unlock_global_np();
	}
#endif /*USE_pthreads*/
	Fexit;
	return r;
}

/*++ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* CALL:
*   int relchunk(p,dsc)
*   	MEMP  p;
*   	char* dsc;
*
* DESCRIPTION:
*
* This releases a chunk back to the free list.  The argument must be a
* pointer  to  a chunk that was earlier returned by GetChunk().  If it
* isn't, we make the guess that it is a bogus address, and add  it  to
* the badchunk list. We could pass it to free(), but this turns out to
* be somewhat risky. If it is truly a bogus address, we die a horrible
* death and can't even diagnose the problem.
*
* It's really too bad that we can't intercept malloc everywhere; if we
* could, we could do a thorough job of identifying malloc'd chunks and
* add  them to our free list.  But if we didn't see p earlier, there's
* no way to determine its size, so we are clueless as to how we  might
* handle it correctly.
*
* We return the chunk's size if we succeed, zero for failure.
*
* AUTHOR: John Chambers.
*
++*/
int relchunk(p,dsc)
	MEMP   p;
	char*  dsc;
{	int    r=0;
	Chunk* chp=0;
	Chunk* prv=0;
	Fenter("relchunk");
	V7M "Chunk addr=%08X to be freed.",p D;
#if defined(USE_pthreads) && (USE_pthreads > 0)
	if (V_locking && use_pthreads) {
		pthread_lock_global_np();
		P7 "\t\t\tpthread_lock_global_np() in %s",Fctname P;
		fflush(Vout);
	}
#endif /*USE_pthreads*/
	ChkNull(p,dsc);
	for (chp = chunklist; chp; prv = chp, chp = chp->next) {
		if (chp->addr == p) {
			chp->flag.free = 1;
			chp->flag.alloc = 0;
			V5M "Chunk %08X=[%08X:%04X=%d] freed.",chp,chp->addr,chp->size,chp->size D;
			if (prv && prv->flag.free	/* If previous block is free */
			&& ((prv->addr + prv->size) == chp->addr)) {
				V5M "Merge %08X with previous chunk %08X ...",chp->addr,prv->addr D;
				chp = mrgchunk(prv,dsc);
			}
			if (chp->next && chp->next->flag.free	/* If next is free */
			&& ((chp->addr + chp->size) == chp->next->addr)) {
				V5M "Merge %08X with next chunk %08X ...",chp->addr,chp->next->addr D;
				mrgchunk(chp,dsc);
			}
			r = chp->size;
			Done;
		}
	}
	V3M "+++ %08X isn't one of our chunks.",p D;
	if (ChkChunk(p,dsc)) {
		Switch(errno) {
		case EFAULT:
			V3M "\t\t\tChunk %08X (%s) is already free.",p,dsc D;
			Fail;
		case EINVAL:
			V3M "\t\t\tAddress %08X (%s) is inside chunk %08X:%08X:%04X=%d",
				p,dsc,chkchp,chkchp->addr,chkchp->size,chkchp->size D;
			Fail;
		case ENOMEM:
			V3M "\t\t\tAddress %08X (%s) is outside all chunks.",p,dsc D;
			break;
		default:
			V3M "\t\t\tAddress %08X (%s) gave unexpected error %d=%s=%s",p,dsc,Errinfo D;
			break;
		}
	} else {
		V2M "\t\t\tChkChunk(%08X,%s)=0 but for loop didn't find it.",p,Dsps(dsc,-1) D;
		if (Vlvl>1) dmpchunklist();
	}
	chp = BadChunk(p,dsc);	/* Record the bad address */
/*
* What do we want to do with it?  For now, nothing.
*/
fail:
done:
#if defined(USE_pthreads) && (USE_pthreads > 0)
	if (V_locking && use_pthreads) {
		pthread_unlock_global_np();
		P7 "\t\t\tpthread_unlock_global_np() in %s",Fctname P;
		fflush(Vout);
	}
#endif /*USE_pthreads*/
	Fexit;
	return r;
}

/*++ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* CALL:
*   Chunk * newchunk(siz,dsc)
*   	int   siz;
*   	char* dsc;
*
* DESCRIPTION:
*
* Get a new chunk of data of siz bytes.  Note that, as called in this
* package,  siz  is  always  a multiple of memalignsize, and we don't
* bother to validate this fact.  If this isn't true, the  worst  that
* can  go  wrong  is  that  we won't recognize adjacent blocks in the
* future, and won't merge them.
*
* AUTHOR: John Chambers.
*
++*/
static Chunk* newchunk(siz,dsc)
	int    siz;
	char*  dsc;
{	Chunk* val=0;
	int    min;
	int    e=0;
	Fenter("newchunk");
#if defined(USE_pthreads) && (USE_pthreads > 0)
	if (V_locking && use_pthreads) {
		pthread_lock_global_np();
		P7 "\t\t\tpthread_lock_global_np() in %s",Fctname P;
		fflush(Vout);
	}
#endif /*USE_pthreads*/
	if (!(val = getheader(dsc))) Fail;
	min = Max(MEMALIGN(siz+8,1024),MEMALIGN(memminsize+8,1024)) - 8;
	val->addr = (MEMP)Malloc(min);
	e = errno;
	if (!val->addr) {
		e = errno;
		P2 m_nomem,pname,min,dsc,Errinfo P;
		if (multchunks == 1) free(val);
		Fail;
	}
	V5M "Got %d-byte Chunk for %s",min,dsc D;
	val->size = min;
	val->flag.free = 1;		/* It's a free chunk for now */
	val->flag.alloc = 0;	/* We haven't handed it out */
	val->flag.malloc = 1;	/* We got it from malloc */
	addchunk(val,dsc);

done:
#if defined(USE_pthreads) && (USE_pthreads > 0)
	if (V_locking && use_pthreads) {
		P7 "\t\t\tpthread_unlock_global_np() in %s",Fctname P;
		fflush(Vout);
		pthread_unlock_global_np();
	}
#endif /*USE_pthreads*/
	FExit;
	return val;
fail:
	errno = e;
	Done;
}

/*++ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* CALL:
*   int  freechunk(p,n,dsc)
*   	MEMP  p;
*   	MEMS  n;
*   	char* dsc;
*
* DESCRIPTION:
*
* The reason for this routine is to free a block of a  specific  size.
* This  will allow the caller to free any (writable) memory, including
* areas that weren't gotten from malloc.  This  includes  two  primary
* cases: You can free a portion of a malloc'd chunk; you can also free
* things like global data that is no longer needed.
*
* This routine is somewhat paranoid; it first scans the chunklist  for
* a  chunk  that  contains  (or overlaps) the chunk being freed.  If a
* match is found, error messages are generated, and 0 is returned with
* errno = EINVAL. Eventually, we hope to have this routine do whatever
* sort of cutting and pasting is needed to make the free work, but for
* now, freeing overlapping chunks doesn't work.
*
* On success, the size n is returned.
*
* AUTHOR: John Chambers.
*
++*/
int freechunk(p,n,dsc)
	MEMP  p;
	MEMS  n;
	char* dsc;
{	MEMP  r=0;
	MEMP  z=p+n;
	Chunk* chp;
	Fenter("freechunk");
	V7M "Free addr=%08X size=%04X=%d [%s]",p,n,n,dsc D;
#if defined(USE_pthreads) && (USE_pthreads > 0)
	if (V_locking && use_pthreads) {
		pthread_lock_global_np();
		P7 "\t\t\tpthread_lock_global_np() in %s",Fctname P;
		fflush(Vout);
	}
#endif /*USE_pthreads*/
	ChkNull(p,dsc);
	if (n <= 0) {
		V2M "\t\t\tCan't free zero-length chunk at %08X",p D;
		errno = EINVAL;
		Fail;
	}
	for (chp = chunklist; chp; chp = chp->next) {
		if ((Ule(chp->addr,p) && Ult(p,chp->addr+chp->size))
		||  (Ult(chp->addr,z) && Ule(z,chp->addr+chp->size))) {
			V2M "\t\t\tChunk %08X:%04X=%d overlaps %08X:%04X=%d [%s]",
				chp->addr,chp->size,chp->size,p,n,n,dsc D;
			if (Vflg['M']>2) DmpChunk(chp,"chunk");
			errno = EINVAL;
			/*
			* Eventually we should do a merge to handle
			* the overlap case.
			*/
			Fail;
		}
		if (!chp->flag.alloc) {	/* Try to extend free chunk */
			if (Ueq(p,chp->addr+chp->size)) {
				V7M "Chunk %08X:%d increased by %d.",chp->addr,chp->size,n D;
				chp->size += n;
				chp->flag.malloc = 0;	/* Don't pass it to free() */
				V7M "Chunk %08X:%d is result",chp->addr,chp->size D;
				Done;
			}
			if (Ueq(z,chp->addr)) {
				V7M "Chunk %08X:%d prepended by %d.",chp->addr,chp->size,n D;
				chp->addr -= n;
				chp->size += n;
				chp->flag.malloc = 0;	/* Don't pass it to free() */
				V7M "Chunk %08X:%d is result",chp->addr,chp->size D;
				Done;
			}
		}
	}
	if (!(chp = getheader(dsc))) Fail;
	chp->addr = p;
	chp->size = n;
	chp->flag.free = 1;		/* It's a free chunk */
	chp->flag.alloc = 0;	/* We haven't handed it out */
	chp->flag.malloc = 0;	/* We didn't get it from malloc */
	addchunk(chp,dsc);		/* Add it to the chunk list */
done:
	if (Vflg['M']>7) dmpchunklist();
#if defined(USE_pthreads) && (USE_pthreads > 0)
	if (V_locking && use_pthreads) {
		P7 "\t\t\tpthread_unlock_global_np() in %s",Fctname P;
		fflush(Vout);
		pthread_unlock_global_np();
	}
#endif /*USE_pthreads*/
	Fexit;
	return n;
fail:
	n = 0;
	Done;
}

/*++ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* CALL:
*   Ulong MemAlign(x,a)
*   	Ulong x;
*   	Ulong a;
*/
Ulong MemAlign(x,a)
	Ulong x, a;
{	Ulong v;

	v = ((x + a - 1) / a) * a;

	return v;
}

/*++ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* CALL:
*   MEMP   sizchunk(ptr,old,new,dsc)
*   	MEMP  ptr;
*   	MEMS  old;
*   	MEMS  new;
*   	char* dsc;
*
* DESCRIPTION:
*
* Change the size of chunk ptr from old to new, and return  the  address  of
* the possibly new chunk. If old <= new, the tail end of the chunk will be
* freed and the return value will be ptr.  If old > new, a check is made  to
* see  if  ptr  can  be  expanded  in  place; if so, it happens and ptr is
* returned.  If the next chunk isn't adjacent and big enough,  then  a
* new  chunk  must  be found, and the first old bytes of ptr copied to it.
* The remaining new-old bytes are zeroed out, and the new chunk's  address
* is returned.
*
* If old is zero, the chunk's old size will be used.
*
* The  return  value  will be the address of the chunk, possibly ptr and
* possibly not.  If a new  chunk  is  needed  and  no  more  space  is
* available, the return value will be null.
*
* AUTHOR: John Chambers.
*
++*/
MEMP sizchunk(ptr,old,new,dsc)
	MEMP   ptr;
	MEMS   old;
	MEMS   new;
	char*  dsc;
{	MEMP   val=0;
	MEMS   siz=old;
	Chunk* chp;
	Chunk* nxt;
	Fenter("sizchunk");
	V5M "Resize %08X from %d to %d bytes [%s]",ptr,siz,new,dsc D;
#if defined(USE_pthreads) && (USE_pthreads > 0)
	if (V_locking && use_pthreads) {
		pthread_lock_global_np();
		P7 "\t\t\tpthread_lock_global_np() in %s",Fctname P;
		fflush(Vout);
	}
#endif /*USE_pthreads*/
	if (!(chp = HdrChunk(ptr,dsc)))
		Fail;
	V7M "Chunk %08X:%08X size=%04X=%d match.",chp,chp->addr,chp->size,chp->size D;
	if (chp->size >= new) {
		V7M "Chunk %08X:%08X size=%d satisfies request for %d.",
			chp,chp->addr,chp->size,new D;
		if (Ugt(chp->size,new))
			splitchunk(chp,new);
		val = chp->addr;
		Done;
	}
	if (siz <= 0) siz = chp->size;
	if (chp->size != siz) {
		V7M "--- Chunk %08X:%08X (%s) size=%d, user claimed %d.",
			chp,chp->addr,dsc,chp->size,siz D;
		/*
		* We ignore the caller's claimed size and use the actual size.
		*/
	}
/*
* If the request is for less space, it's easy to satisfy.
*/
	if (chp->size >= new) {
		V7M "Chunk %08X=%08X (%s) size %d >= %d bytes.",chp,chp->addr,dsc,chp->size,new D;
		if (Ugt(chp->size,new)) {
			splitchunk(chp,new);
		}
		val = chp->addr;
		Done;
	}
/*
* We now check to see if the chunk can be grown to  its  new  size  by
* annexing  the  next chunk.  This might seem unlikely, as it requires
* that the next chunk be adjacent, free and big enough.  But there are
* applications in which this happens, and it isn't expensive to check.
*/
	if ((nxt = chp->next) && nxt->flag.free) {
		V7M "Next chunk %08X:%08X is free.",nxt,nxt->addr D;
		if ((chp->addr + chp->size) == nxt->addr) {
			V7M "Next chunk %08X:%08X is adjacent to %08X:%08X.",
				nxt,nxt->addr,chp,chp->addr D;
			if ((chp->size + nxt->size) >= new) {
				V7M "Next chunk is big enough %d+%d >= new=%d.",chp->size,nxt->size,new D;
				mrgchunk(chp,dsc);			/* Grow this chunk */
				if (Ugt(chp->size,new)) {	/* If it's too big */
					V7M "Merged chunk is too big: new=%d < size=%d.",new,chp->size D;
					splitchunk(chp,new,dsc);
				}
				V7M "Chunk %08X:%08X extended to %d bytes [%s]",
					chp,chp->addr,chp->size,dsc D;
				if (chp->size >= new) {
					V7M "Chunk is big enough %d >= %d.",chp->size,new D;
					val = chp->addr;
					Done;
				} else {	/* Paranoia */
					V3M "+++ Merged %08X:%08X but size %d still < %d.",
						chp,chp->addr,chp->size,new D;
				}
			} else {
				V7M "Merge would get only %d bytes; need %d.",chp->size+nxt->size,new D;
			}
		} else {
			V7M "Chunk %08X:%08X not adjacent to %08X:%08X",nxt,nxt->addr,chp,chp->addr D;
		}
	}
/*
* That didn't work.  We must allocate a new block and copy the data over
* to it.
*/
	if (!(val = GetAChunk(new,chp->algn,dsc))) {
		V2M "\t\t\tCan't get space to expand %s to %d bytes.",Dsps(dsc,-1),new D;
		Fail;
	}
	BCopy(chp->addr,val,chp->size);
done:
	V5M "Chunk %08X size %d resized to %08X size %d [%s]",ptr,chp->size,val,new,dsc D;
fail:
#if defined(USE_pthreads) && (USE_pthreads > 0)
	if (V_locking && use_pthreads) {
		P7 "\t\t\tpthread_unlock_global_np() in %s",Fctname P;
		fflush(Vout);
		pthread_unlock_global_np();
	}
#endif /*USE_pthreads*/
	Fexit;
	return val;
}

/*++ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* CALL:
*   Chunk* splitchunk(chp,siz,dsc)
*   	Chunk* chp;
*   	int    siz;
*   	char*  dsc;
*
* DESCRIPTION:
*
* Split a chunk into two.  The original chunk must be  big  enough  to
* split  into  two  chunks  of  sizes  siz  and M(chp->size-siz).  This
* requires that the remainder be at least memalignsize bytes. The result of
* this  will  be  that  the  original chunk has size == siz, and a new
* chunk will be introduced immediately  after  it  that  contains  the
* remainder of the original chunk.
*
* The return value is the new chunk, or null for failure.  Failure can
* only  happen if chp isn't big enough to split, or if we can't malloc
* a new Chunk structure.
*
* The old chunk at chp will keep its flags, but  the  new  chunk  that
* gets  the  remainder is flagged as being free and unallocated.  Both
* are flagged as non-malloced, since it's too dangerous to try freeing
* the original, and the remainder can't be freed.
*
* AUTHOR: John Chambers.
*
++*/
static Chunk* splitchunk(chp,siz,dsc)
	Chunk* chp;
	int    siz;
	char*  dsc;
{	Chunk* val=0;
	int    e=0;
	Fenter("splitchunk");
	V7M "Chunk %08X size=%d split to size %d, %d.",chp,chp->size,siz,chp->size-siz D;
	if ((chp->size-siz) < memalignsize) {
		V3M "--- Called to split %d-byte chunk into %d and %d.",chp->size,siz,chp->size-siz D;
	/*	e = EINVAL; */
	/*	Fail; */
	}
	if (!(val = getheader(dsc))) Fail;
#if defined(USE_pthreads) && (USE_pthreads > 0)
	if (V_locking && use_pthreads) {
		pthread_lock_global_np();
		P7 "\t\t\tpthread_lock_global_np() in %s",Fctname P;
		fflush(Vout);
	}
#endif /*USE_pthreads*/
	val->addr = chp->addr + siz;	/* New block gets remainder */
	val->size = chp->size - siz;
	chp->size = siz;
	val->flag.free = 1;		/* New block is marked as free */
	val->flag.alloc = 0;	/* and unallocated */
	val->flag.malloc = 0;	/* We can't return either to malloc */
	chp->flag.malloc = 0;
	val->next = chp->next;	/* Link the new one into the list */
	chp->next = val;
#if defined(USE_pthreads) && (USE_pthreads > 0)
	if (V_locking && use_pthreads) {
		P7 "\t\t\tpthread_unlock_global_np() in %s",Fctname P;
		fflush(Vout);
		pthread_unlock_global_np();
	}
#endif /*USE_pthreads*/
fail:
	FExit;
	return val;
}
