head     56.3;
access   paws bayes jws quist brad dew jwh;
symbols  ;
locks    ; strict;
comment  @ * @;


56.3
date     93.01.27.13.38.35;  author jwh;  state Exp;
branches ;
next     56.2;

56.2
date     93.01.27.12.13.50;  author jwh;  state Exp;
branches ;
next     56.1;

56.1
date     91.11.05.09.35.35;  author jwh;  state Exp;
branches ;
next     55.1;

55.1
date     91.08.25.10.14.09;  author jwh;  state Exp;
branches ;
next     54.1;

54.1
date     91.03.18.15.21.13;  author jwh;  state Exp;
branches ;
next     53.1;

53.1
date     91.03.11.19.22.34;  author jwh;  state Exp;
branches ;
next     52.1;

52.1
date     91.02.19.09.06.14;  author jwh;  state Exp;
branches ;
next     51.1;

51.1
date     91.01.30.16.05.22;  author jwh;  state Exp;
branches ;
next     50.1;

50.1
date     90.10.29.16.20.49;  author jwh;  state Exp;
branches ;
next     49.1;

49.1
date     90.08.14.14.05.07;  author jwh;  state Exp;
branches ;
next     48.1;

48.1
date     90.07.26.11.10.51;  author jwh;  state Exp;
branches ;
next     47.1;

47.1
date     90.05.14.10.49.34;  author dew;  state Exp;
branches ;
next     46.1;

46.1
date     90.05.07.08.37.08;  author jwh;  state Exp;
branches ;
next     45.1;

45.1
date     90.04.19.15.44.22;  author jwh;  state Exp;
branches ;
next     44.1;

44.1
date     90.04.01.22.02.12;  author jwh;  state Exp;
branches ;
next     43.1;

43.1
date     90.03.20.13.52.04;  author jwh;  state Exp;
branches ;
next     42.1;

42.1
date     90.01.23.17.37.30;  author jwh;  state Exp;
branches ;
next     41.1;

41.1
date     89.12.22.11.19.50;  author jwh;  state Exp;
branches ;
next     40.1;

40.1
date     89.09.29.11.42.33;  author jwh;  state Exp;
branches ;
next     39.1;

39.1
date     89.09.26.16.28.02;  author dew;  state Exp;
branches ;
next     38.1;

38.1
date     89.08.29.11.18.40;  author jwh;  state Exp;
branches ;
next     37.1;

37.1
date     89.05.12.11.32.05;  author dew;  state Exp;
branches ;
next     36.1;

36.1
date     89.02.06.10.11.02;  author dew;  state Exp;
branches ;
next     35.1;

35.1
date     89.02.02.13.22.56;  author dew;  state Exp;
branches ;
next     34.1;

34.1
date     89.01.23.15.58.08;  author jwh;  state Exp;
branches ;
next     33.1;

33.1
date     89.01.16.11.32.59;  author dew;  state Exp;
branches ;
next     32.1;

32.1
date     89.01.10.11.39.21;  author bayes;  state Exp;
branches ;
next     31.1;

31.1
date     88.12.14.18.00.21;  author bayes;  state Exp;
branches ;
next     30.1;

30.1
date     88.12.09.13.38.30;  author dew;  state Exp;
branches ;
next     29.1;

29.1
date     88.10.31.15.22.35;  author bayes;  state Exp;
branches ;
next     28.2;

28.2
date     88.10.11.16.59.36;  author bayes;  state Exp;
branches ;
next     28.1;

28.1
date     88.10.06.10.51.42;  author dew;  state Exp;
branches ;
next     27.1;

27.1
date     88.09.29.11.18.09;  author bayes;  state Exp;
branches ;
next     26.1;

26.1
date     88.09.28.12.37.05;  author bayes;  state Exp;
branches ;
next     25.1;

25.1
date     88.03.02.09.13.42;  author bayes;  state Exp;
branches ;
next     24.1;

24.1
date     87.08.31.09.26.46;  author jws;  state Exp;
branches ;
next     23.1;

23.1
date     87.08.26.10.02.47;  author bayes;  state Exp;
branches ;
next     22.1;

22.1
date     87.08.17.10.44.45;  author bayes;  state Exp;
branches ;
next     21.1;

21.1
date     87.08.12.13.31.12;  author bayes;  state Exp;
branches ;
next     20.2;

20.2
date     87.08.07.15.49.45;  author bayes;  state Exp;
branches ;
next     20.1;

20.1
date     87.07.30.10.43.01;  author bayes;  state Exp;
branches ;
next     19.3;

19.3
date     87.07.20.11.36.59;  author bayes;  state Exp;
branches ;
next     19.2;

19.2
date     87.06.18.10.43.20;  author bayes;  state Exp;
branches ;
next     19.1;

19.1
date     87.06.01.08.00.41;  author jws;  state Exp;
branches ;
next     18.1;

18.1
date     87.05.20.14.55.59;  author bayes;  state Exp;
branches ;
next     17.1;

17.1
date     87.04.30.10.12.10;  author jws;  state Exp;
branches ;
next     16.1;

16.1
date     87.04.26.15.26.00;  author jws;  state Exp;
branches ;
next     15.1;

15.1
date     87.04.13.08.54.57;  author jws;  state Exp;
branches ;
next     14.1;

14.1
date     87.04.01.14.53.30;  author jws;  state Exp;
branches ;
next     13.1;

13.1
date     87.02.28.18.13.38;  author jws;  state Exp;
branches ;
next     12.1;

12.1
date     87.02.02.13.01.44;  author jws;  state Exp;
branches ;
next     11.2;

11.2
date     87.01.29.14.34.47;  author jws;  state Exp;
branches ;
next     11.1;

11.1
date     87.01.19.09.27.22;  author jws;  state Exp;
branches ;
next     10.3;

10.3
date     87.01.13.10.24.06;  author jws;  state Exp;
branches ;
next     10.2;

10.2
date     87.01.13.10.20.15;  author jws;  state Exp;
branches ;
next     10.1;

10.1
date     86.12.24.10.34.19;  author jws;  state Exp;
branches ;
next     9.2;

9.2
date     86.12.16.11.30.45;  author jws;  state Exp;
branches ;
next     9.1;

9.1
date     86.12.12.14.00.43;  author bayes;  state Exp;
branches ;
next     8.4;

8.4
date     86.12.08.18.50.15;  author jws;  state Exp;
branches ;
next     8.3;

8.3
date     86.12.04.15.06.39;  author jws;  state Exp;
branches ;
next     8.2;

8.2
date     86.12.04.14.56.25;  author jws;  state Exp;
branches ;
next     8.1;

8.1
date     86.11.27.11.31.33;  author jws;  state Exp;
branches ;
next     7.1;

7.1
date     86.11.20.13.20.09;  author hal;  state Exp;
branches ;
next     6.1;

6.1
date     86.11.04.17.23.59;  author paws;  state Exp;
branches ;
next     5.1;

5.1
date     86.10.28.16.08.54;  author hal;  state Exp;
branches ;
next     4.4;

4.4
date     86.10.20.14.56.00;  author hal;  state Exp;
branches ;
next     4.3;

4.3
date     86.10.10.15.24.15;  author hal;  state Exp;
branches ;
next     4.2;

4.2
date     86.10.09.13.29.54;  author hal;  state Exp;
branches ;
next     4.1;

4.1
date     86.09.30.19.23.53;  author hal;  state Exp;
branches ;
next     3.2;

3.2
date     86.09.04.17.36.13;  author hal;  state Exp;
branches ;
next     3.1;

3.1
date     86.09.01.11.33.15;  author hal;  state Exp;
branches ;
next     2.11;

2.11
date     86.08.27.12.37.13;  author hal;  state Exp;
branches ;
next     2.10;

2.10
date     86.08.27.12.33.51;  author hal;  state Exp;
branches ;
next     2.9;

2.9
date     86.08.26.16.18.23;  author hal;  state Exp;
branches ;
next     2.8;

2.8
date     86.08.25.12.28.09;  author hal;  state Exp;
branches ;
next     2.7;

2.7
date     86.08.25.10.18.09;  author hal;  state Exp;
branches ;
next     2.6;

2.6
date     86.08.22.08.36.19;  author hal;  state Exp;
branches ;
next     2.5;

2.5
date     86.08.20.16.39.56;  author hal;  state Exp;
branches ;
next     2.4;

2.4
date     86.08.19.15.19.48;  author hal;  state Exp;
branches ;
next     2.3;

2.3
date     86.08.18.12.25.16;  author hal;  state Exp;
branches ;
next     2.2;

2.2
date     86.07.30.18.12.40;  author hal;  state Exp;
branches ;
next     2.1;

2.1
date     86.07.30.14.24.27;  author hal;  state Exp;
branches ;
next     1.21;

1.21
date     86.07.29.12.42.37;  author hal;  state Exp;
branches ;
next     1.20;

1.20
date     86.07.28.14.08.40;  author hal;  state Exp;
branches ;
next     1.19;

1.19
date     86.07.28.08.54.45;  author hal;  state Exp;
branches ;
next     1.18;

1.18
date     86.07.23.18.36.10;  author hal;  state Exp;
branches ;
next     1.17;

1.17
date     86.07.22.16.37.32;  author hal;  state Exp;
branches ;
next     1.16;

1.16
date     86.07.22.16.15.24;  author hal;  state Exp;
branches ;
next     1.15;

1.15
date     86.07.21.16.50.48;  author hal;  state Exp;
branches ;
next     1.14;

1.14
date     86.07.18.14.29.22;  author hal;  state Exp;
branches ;
next     1.13;

1.13
date     86.07.17.17.23.04;  author hal;  state Exp;
branches ;
next     1.12;

1.12
date     86.07.16.09.49.38;  author hal;  state Exp;
branches ;
next     1.11;

1.11
date     86.07.11.14.08.50;  author hal;  state Exp;
branches ;
next     1.10;

1.10
date     86.07.08.18.56.49;  author hal;  state Exp;
branches ;
next     1.9;

1.9
date     86.07.08.13.32.24;  author hal;  state Exp;
branches ;
next     1.8;

1.8
date     86.07.08.08.52.35;  author hal;  state Exp;
branches ;
next     1.7;

1.7
date     86.07.04.12.26.17;  author hal;  state Exp;
branches ;
next     1.6;

1.6
date     86.07.04.11.15.06;  author hal;  state Exp;
branches ;
next     1.5;

1.5
date     86.07.04.10.06.36;  author hal;  state Exp;
branches ;
next     1.4;

1.4
date     86.07.03.11.28.08;  author danm;  state Exp;
branches ;
next     1.3;

1.3
date     86.06.12.11.00.15;  author danm;  state Exp;
branches ;
next     1.2;

1.2
date     86.06.06.16.56.20;  author danm;  state Exp;
branches ;
next     1.1;

1.1
date     86.06.04.08.41.24;  author geli;  state tmp;
branches ;
next     ;


desc
@Base file for PWS 3.2 release.

@


56.3
log
@
pws2rcs automatic delta on Wed Jan 27 13:14:25 MST 1993
@
text
@$modcal$

$linenum 7000$
$lines 54$

$partial_eval on$

$range off$
$ovflcheck off$
$debug off$

{
{ HFS CACHE
{ handles caching for
{       superblocks
{       cgroups
{       inode
{       data (indir blocks, dir contents)
}


module hfscache;

$search 'hfstuff', 'hfscalc', 'hfsupport'$

import
    sysdevs,
    hfstuff,
    asm,
    hfscalc,
    hfsupport,
    sysglobals,
    iocomasm;


{
{ Rules of usage:
{ SET-UP:
{  1.  call init_cache first
{  2.  call init_hfs_unit for every hfs unit
{  3.  call configure_cache once step 2 is finished
{ NORMAL USE:
{  1.  call get_superblock before doing anything else with a
{       given unit.
{  2.  only one unit is active at a time.  get_superblock makes it
{       active.  All other calls use the active unit.
{  3.  call sync to flush all buffers to disc.  This is NOT done
{       automatically by hfsalloc, so must be done in the DAM/TM.
{       Sync also sets all use counts to 0 (to recover from escapes),
{       so call it only when all put_* have been done.
{  4.  for debugging, call check_cache when a transaction is finished.
{       It complains if any of the cache is still in use.
{  5.  call invalidate_unit to invalidate any in-core buffers.  This
{       MUST happen when the user swaps floppies in the same drive,
{       for example.
{  6.  get/put work as follows.  Call get_* to get *.  When you're
{       through with it, call put_* with release.  If you ever
{       write to *, call put_* with dirty or immediate.
{       dirty/immediate/release can be combined as desired in
{       a single put_* call.  There is also "invalid", which means that
{       the buffer contents are invalid.
{ INTERRUPTS:
{      Because caching is done in static heap space, the hfs driver is NOT
{       reentrant with respect to interrupts.  ie, if you call the AM, DAM,
{       or the hfsTM from within an ISR, you risk messing up any lower
{       level transactions currently happening, even if you've got your
{       own FIB!
{ MEDIA CHANGE:
{      Exactly the same as "SETUP" above.
}

{
{ How the cache is set up:
{  1.  Fixed-size block allocated by init_cache.  "hfs_cache_bytes" is size
{       in bytes of cache.  This size is user-configurable,
{       in the module hfs_user.
{  2.  init_hfs_unit looks at the beginning of the superblock and
{       calculates how big each superblock and cgroup really are.
{       It remembers the biggest.  Under PAWS, it should be called
{       by TABLE.
{  3.  configure_cache takes info from step 2 and doles out the space
{       allocated in step 1.  It is called by TABLE
{       under PAWS.  Configuration consists of deciding how many
{       of each buffer we will have, and setting up the buffer headers
{       accordingly.
}


export

const
    { size of cache block in bytes.  Must be power of 2, >= 1K }
    cache_blk_size = 1024;

type
    cache_blk_type = packed array[0..cache_blk_size-1] of char;
    cache_blk_ptr_type = ^cache_blk_type;
    cache_action_type = (release, dirty, immediate, invalid, stamping);
    cache_action_set = set of cache_action_type;

    { inode a little bigger in core }
    binode_type = packed record
	inode: inode_type;
	inumber: integer;
    end;
    binode_ptr_type = ^binode_type;

var
    current_super: super_block_ptr_type;


{
{ cache control
}
procedure init_cache;
procedure init_hfs_unit(unum: integer; force: boolean);
procedure configure_cache;
procedure sync;
procedure invalidate_unit(badunit: integer);

{
{ disc access for each item
}
function  get_superblock(unum: shortint): super_block_ptr_type;
procedure put_superblock(fs: super_block_ptr_type;
			 how: cache_action_set);

function  get_cgroup(groupnum: integer): cgroup_ptr_type;
procedure put_cgroup(cgp: cgroup_ptr_type;
		     how: cache_action_set);

function  get_inode(nodenum: integer): inode_ptr_type;
procedure put_inode(ip: inode_ptr_type;
		    how: cache_action_set);

function  get_datablk(fragnum: frag_type;
		      offset: integer): cache_blk_ptr_type;
function  get_edatablk(fragnum: frag_type;
		       offset: integer): cache_blk_ptr_type;
procedure put_datablk(dp: cache_blk_ptr_type;
		      how: cache_action_set);

{
{ miscellaneous
}
function inumber(iptr: inode_ptr_type): integer;
function itype(ip: inode_ptr_type): integer;
procedure nuke_unit(unit : integer);

implement

const
    { for enumerating item types -- must begin with 1 }
    I_SB = 1;
    I_CG = 2;
    I_INODE = 3;
    I_DATABLK = 4;
    NUM_ITEMS = 4;

    { for "empty" (no disc read) data blks, <> any item type }
    I_EDATABLK = -1;

type
    { buffer control record }
    buf_hdr_ptr_type = ^buf_hdr_type;
    buf_hdr_type = packed record
	is_dirty: boolean;
	item: 1..NUM_ITEMS;
	use_count: 0..255;
	unit: shortint;
	disc_addr: integer;
	age: integer {was ushort. SFB};
	max_size: ushort;
	size1, size2: shortint;
	next: buf_hdr_ptr_type;
	case integer of
	    1: (iptr: inode_ptr_type);
	    2: (fsptr: super_block_ptr_type);
	    3: (cgptr: cgroup_ptr_type);
	    4: (dptr: cache_blk_ptr_type);
	    5: (wptr: windowp);
    end;
    buf_hdr_array_type = array[1..maxint] of buf_hdr_type;
    buf_hdr_array_ptr_type = ^buf_hdr_array_type;

type
    item_info_type = array[1..NUM_ITEMS] of integer;

const
    { max number of each item in cache }
    MAX_SBS = 2;
    MAX_CGS = 20;
    MAX_INODES = 100;
    MAX_DATABLKS = 50;

    { number of each item in cache "unit" }
    UNIT_SBS = 1;
    UNIT_CGS = 3;
    UNIT_INODES = 10;
    UNIT_DATABLKS = 3;

    { can't survive with fewer items than this }
    MIN_SBS = 1;
    MIN_CGS = 1;
    MIN_INODES = 3;
    MIN_DATABLKS = 2;

    { minimum cache size }
    MIN_CACHE_BYTES = 10*1024;

    { default cache size }
    DEF_CACHE_BYTES = 15*1024;

    { illegal unit number }
    NO_UNIT = -1;

    { and another, which can be the same }
    ALL_UNITS = -1;

    bytes_per_ptr = 4;

var
    { address of buf hdrs for each item }
    buf_hdrs_array: array[1..NUM_ITEMS] of buf_hdr_array_ptr_type;

    { linked list of buf hdrs for each item }
    buf_list_array: array[1..NUM_ITEMS] of buf_hdr_ptr_type;

    { how many of each item actually present }
    buf_count: item_info_type;

    { how many items in a tiled unit }
    unit_count: item_info_type;

    { maximum, minimum count for each item }
    max_count: item_info_type;
    min_count: item_info_type;

    { size of each item }
    item_size: item_info_type;

    { size of each buffer -- item size plus a backptr }
    buf_size: item_info_type;

    { array of buffers }
    cache_space: anyptr;
    { size of the array }
    cache_bytes: integer;

    { pseudo-clock for buffer aging }
    pclock: integer {was ushort. SFB} ;

    { where current_super lives }
    current_unit: shortint;

    { has cache been properly initialized? }
    initialized: boolean;

{-------------------------------------------------------------------------}
{
{ Miscellaneous utility routines
}

{
{ actual_sb_size
{ calculates size of sb, not including csumm info, that
{ we read and write.  this info is in superblock, but always
{ rounded up to fragment, too coarse for our purposes.
{ sb size is the fixed part, plus 1 byte for each big block
{ in a cylinder cycle.
}
function actual_sb_size(fs: super_block_ptr_type): integer;
var
    sb_bytes: integer;
begin
    sb_bytes := sizeof(fs^) + howmany(fs^.spc * fs^.cpc, nspb(fs));
    actual_sb_size := roundup(sb_bytes, DISK_SECTOR);
end;

{
{ actual_cs_size
{ size of csumm info
{ cs size is one csum structure per cylinder group
}
function actual_cs_size(fs: super_block_ptr_type): integer;
var
    cs_bytes: integer;
begin
    cs_bytes := fs^.ncg * sizeof(csumm_type);
    actual_cs_size := roundup(cs_bytes, DISK_SECTOR);
end;

{
{ actual_cg_size
{ size of cg info
{ cg size is fixed part, plus 1 bit for each fragment in the
{ cylinder group.
}
function actual_cg_size(fs: super_block_ptr_type): integer;
var
    cg_bytes: integer;
begin
    cg_bytes := sizeof(cylinder_group_block_type) + howmany(fs^.fpg, NBBY);
    actual_cg_size := roundup(cg_bytes, DISK_SECTOR);
end;


{
{ assign_space
{ We have decided how many headers and buffers for each item can fit into
{ the given cache space.  Now we assign the headers and buffers for a single
{ item.  count is how
{ many we can afford.  size is the size of each buffer plus header.
{ cache_address is the address of the space we are allotted,
{ which we keep up to date.
{ The beginning of the space is taken for buffer headers,
{ and the end for buffers.  The hdrs are initialized, and the
{ hdrs and buffers are pointed to each other.
}
procedure assign_space(var cache_address: ipointer;
		       count, size: integer;
		      this_item: integer);
var
    i: integer;
    buf_hdr: buf_hdr_ptr_type;
begin
    { first get the hdr space }
    buf_hdrs_array[this_item] := buf_hdr_array_ptr_type(cache_address);
    buf_list_array[this_item] := buf_hdr_ptr_type(cache_address);
    buf_hdr := buf_hdr_ptr_type(cache_address);
    cache_address := addr(cache_address^, count*sizeof(buf_hdr_type));
    size := size - sizeof(buf_hdr_type);

    { then initialize each hdr-buffer pair }
    for i := 1 to count do begin
	with buf_hdr^ do begin
	    is_dirty := false;
	    item := this_item;
	    use_count := 0;
	    unit := NO_UNIT;
	    max_size := size - bytes_per_ptr;
	    dptr := addr(cache_address^, bytes_per_ptr);
	    if i = count then
		next := nil
	    else
		next := addr(buf_hdr^, sizeof(buf_hdr_type));
	end;
	{ back ptr from buffer to buf hdr }
	cache_address^ := integer(buf_hdr);
	cache_address := addr(cache_address^, size);
	buf_hdr := addr(buf_hdr^, sizeof(buf_hdr_type));
    end;
end;


{
{ allocate_extra
{ takes some extra cache space and allocates as much of it
{ as possible to the given item.  There is extra cache space
{ because the unit of allocation might not fit into the cache
{ space an integral number of times, or because some buf hdr
{ arrays (like sb) are kept deliberately small.
{ updates extra_bytes to show how much taken.
}
procedure allocate_extra(var extra_bytes: integer; item: integer);
var
    extra_bufs: integer;
begin
    extra_bufs := min(max_count[item] - buf_count[item],
		      extra_bytes div buf_size[item]);
    buf_count[item] := buf_count[item] + extra_bufs;
    extra_bytes := extra_bytes - (extra_bufs * buf_size[item]);
end;

{-------------------------------------------------------------------------}
{
{ buffer read and write routines
}

{
{ read_buf
{ reads a buffer from the disc
{ caller sets:
{       unit
{       disc_address
{       item
{       size1 (ignored for superblocks)
{ max_size shows the max buffer size available.
{ size1 shows how much we should read.
{ for superblocks, there is a small bootstrap problem,
{ because we don't know how much to read until we see it.
{ so we read the max size, then set size1 and size2 and
{ read the csumm info.
{ ioresult shows success or failure.
}
procedure read_buf(buf_hdr: buf_hdr_ptr_type);
label
    999;
var
    sector_buf: packed array[1..DISK_SECTOR] of char;
    sector_addr: integer;
    buf_iptr: inode_ptr_type;
    cs_size: integer;
    cs_mem_addr: csumm_array_ptr_type;
    i: integer;
begin
    with buf_hdr^ do
	case item of

	  I_INODE:
	    begin
		{ get address of sector holding this inode }
		sector_addr := rounddownp2(disc_addr, DISK_SECTOR);
		{ read in the sector }
		get_bytes(unit, DISK_SECTOR, sector_addr, addr(sector_buf));
		{ and copy the inode }
		buf_iptr := addr(sector_buf, disc_addr - sector_addr);
		moveleft(buf_iptr^, iptr^, size1);
	    end;

	  I_DATABLK, I_CG:
	    begin
		get_bytes(unit, size1, disc_addr, wptr);
		if (ioresult = ord(inoerror)) and (item = I_CG)
		and (cgptr^.magic <> CG_MAGIC) then begin
		    set_corrupt;
		    ioresult := ord(icorrupt);
		end;
	    end;

	  I_SB:
	    begin
		{ read the superblock }
		get_bytes(unit, min(SBSIZE, max_size), disc_addr, wptr);
		if fsptr^.magic <> FS_MAGIC then begin
		    ioresult := ord(inodirectory);
		    goto 999;
		end;
		if fsptr^.clean <> chr(FS_CLEAN) then
		    set_corrupt;
		size1 := actual_sb_size(fsptr);
		size2 := actual_cs_size(fsptr);
		{ read in the csumm info }
		get_bytes(unit, size2,
			  fragstobytes(fsptr, fsptr^.csaddr),
			  addr(wptr^, size1));
		{ install pointers to csumm info in superblock }
		cs_size := size2;
		cs_mem_addr := addr(wptr^, size1);
		i := 0;
		while cs_size > 0 do begin
		    fsptr^.csp[i] := cs_mem_addr;
		    cs_size := cs_size - fsptr^.bsize;
		    cs_mem_addr := addr(cs_mem_addr^, fsptr^.bsize);
		    i := i + 1;
		end;
	    end;

	 {unknown item in cache. this is DISASTER, and we must DIE!}
	  OTHERWISE begin ioresult:=ord(zdvrnoconfig); escape(-10); end;

	end;
999:
end;


{
{ write_buf
{ writes a buffer to disc
{ the size of the item is in the buffer header, as is
{ the disc address.  "size1" is the size of the item
{ currently there; if this is a superblock, then
{ "size2" gives the csumm info size.
}
procedure write_buf(buf_hdr: buf_hdr_ptr_type);
label
    999;
var
    sector_buf: packed array[1..DISK_SECTOR] of char;
    sector_addr: integer;
    buf_iptr: inode_ptr_type;
begin
    with buf_hdr^ do if unit <> NO_UNIT then begin
	case item of

	  I_INODE:
	    begin
		{ get address of sector holding this inode }
		sector_addr := rounddownp2(disc_addr, DISK_SECTOR);
		{ read in the sector }
		get_bytes(unit, DISK_SECTOR, sector_addr, addr(sector_buf));
		if ioresult <> ord(inoerror) then
		    goto 999;
		{ copy the inode }
		buf_iptr := addr(sector_buf, disc_addr - sector_addr);
		moveleft(iptr^, buf_iptr^, size1);
		{ and put the sector back }
		put_bytes(unit, DISK_SECTOR, sector_addr, addr(sector_buf));
	    end;

	  I_DATABLK, I_CG:
	    begin
		if item = I_CG then
		    cgptr^.time := sysgmttime;
		put_bytes(unit, size1, disc_addr, wptr);
	    end;

	  I_SB:
	    begin
		fsptr^.time := sysgmttime;
		put_bytes(unit, size1, disc_addr, wptr);
		if ioresult <> ord(inoerror) then
		    goto 999;
		put_bytes(unit, size2,
			  fragstobytes(fsptr, fsptr^.csaddr),
			  addr(wptr^, size1));
	    end;

	 {unknown item in cache. this is DISASTER, and we must DIE!}
	  OTHERWISE begin ioresult:=ord(zdvrnoconfig); escape(-10); end;

	end;
    end;
999:
end;



{---------------------------------------------------------------------------}
{
{ get and put buffers
}

{
{ block_in_cache
{ Returns given buffer if in cache.
{ If not in cache, returns
{       oldest unused buf hdr (if any)
{       an invalid buf hdr (if any)
{ buffers are linked in the next chain so that the most recently used
{ is first.
{ changed return type to buf_hdr_ptr_type from cache_blk_ptr_type, for
{ more generality and usefulness (see nuke_unit).
{ SFB
}
function block_in_cache(unit_wanted: integer;
			addr_wanted: integer;
			this_item: integer;
			var oldest_hdr: buf_hdr_ptr_type;
			var invalid_hdr: buf_hdr_ptr_type): buf_hdr_ptr_type;
label
    999;
var
    oldest_age: integer;
    buf_hdr, prev_buf_hdr: buf_hdr_ptr_type;
    i: integer;
begin

    if not initialized then begin
	ioresult := ord(zdvrnoconfig);
	escape(-10);
    end;

    block_in_cache := nil;

    oldest_hdr := nil;
    invalid_hdr := nil;

    buf_hdr := buf_list_array[this_item];
    prev_buf_hdr := nil;

    repeat
	with buf_hdr^ do begin
	    { found it? }
	    if (unit = unit_wanted) and (disc_addr = addr_wanted) then begin
		use_count := use_count + 1;
	       {block_in_cache := dptr; {SFB}
		block_in_cache := buf_hdr;      {SFB}
		{ maintain list in order of use }
		if prev_buf_hdr <> nil then begin
		    prev_buf_hdr^.next := next;
		    next := buf_list_array[this_item];
		    buf_list_array[this_item] := buf_hdr;
		end;
		goto 999;
	    end;
	    if invalid_hdr = nil then
		{ invalid? }
		if unit = NO_UNIT then
		    invalid_hdr := buf_hdr
		else
		{ oldest so far? }
		if use_count = 0 then
		    if (oldest_hdr = nil) or (age < oldest_age) then begin
			oldest_hdr := buf_hdr;
			oldest_age := age;
		    end;
	    prev_buf_hdr := buf_hdr;
	    buf_hdr := next;
	end;
    until buf_hdr = nil;
999:
end;

{ Procedure to really "clean up" after unexpected escape.
{ It is used whenever hfsdam gets to its main try-recover with
{ escapecode <> 0, or in get_buf, if a "foreign" (non dam target
{ unit) cache record causes escape while flushing. It tries to mark
{ the superblock dirty and flush, iff it is currently in cache, and
{ there is no error during flush. It will not try to load the
{ superblock if it's currently not there, as this is more complexity
{ and IO than we care for. It also removes cache records for that
{ unit from the cache, marks it is_corrupt in h_unitable, and
{ returns ioresult=ord(zcatchall), iff ioresult was 0 at entry.

{ Call it with the desired unum; it will figure out base_unum.
{
{ nuke_unit preserves escapecode, but always sets ioresult to something
{ non-zero.
{
{ SFB
}
procedure nuke_unit(unit : integer);
var
  in_cache_buf, oldest, invalid : buf_hdr_ptr_type;
  old_esccode, old_ior : integer;
begin
 unit := h_unitable^.tbl[unit].base_unum;
 old_ior := ioresult;
 old_esccode := escapecode;  {save so caller can use nuke_unit in
			      recover block}
 in_cache_buf := block_in_cache(unit, SBLOCK*DEV_BSIZE, I_SB, oldest,
				invalid);
 if in_cache_buf <> nil then
   try
    {mark superblock not_ok, and flush}
    in_cache_buf^.fsptr^.clean := chr(FS_NOTOK);
    write_buf(in_cache_buf);
   recover ; {ignore escapes, as we don't care if it fails}
 invalidate_unit(unit);
 h_unitable^.tbl[unit].fs_corrupt := true;
 sysescapecode := old_esccode;
 if old_ior = ord(inoerror) then
  ioresult:=ord(zcatchall)
 else
  ioresult:=old_ior;
end;



{
{ generic get routine
{ find buffer with given item and address
{ failing that, find invalid buffer (unit = NO_UNIT)
{ failing that, find oldest valid unused buffer
{ failing that, panic
{ read the disc if info not already in the cache.
{ exception: I_EDATABLK is for datablks that we shouldn't read.
}
function get_buf(unit_wanted: integer; addr_wanted: integer;
		 size, this_item: integer): cache_blk_ptr_type;
label
    999;
var
    do_read: boolean;
    buf_hdr: buf_hdr_ptr_type;
    oldest_hdr: buf_hdr_ptr_type;
    invalid_hdr: buf_hdr_ptr_type;
    in_cache_buf: buf_hdr_ptr_type;
    tmpioresult: integer;
begin
    get_buf := nil;
    tmpioresult := ioresult;
    ioresult := ord(inoerror);

    if this_item = I_EDATABLK then begin
	this_item := I_DATABLK;
	do_read := false;
    end
    else
	do_read := true;

    in_cache_buf := block_in_cache(unit_wanted, addr_wanted, this_item,
				   oldest_hdr, invalid_hdr);

    if in_cache_buf <> nil then begin
	{ block found in cache }
       {get_buf := in_cache_buf;        {SFB}
	get_buf := in_cache_buf^.dptr;  {SFB}
	goto 999;
    end;

    { not in cache.  can we use an available buffer? }
    if invalid_hdr <> nil then
	buf_hdr := invalid_hdr
    else
    if oldest_hdr <> nil then
	buf_hdr := oldest_hdr
    else begin
	ioresult := ord(zdvrcachefull);
	goto 999;
    end;

    { now we have a buffer }
    with buf_hdr^ do begin
	if is_dirty and (unit <> NO_UNIT) then
	  try   {make sure that any escape here: 1) tries to mark superblock
		 fs_notok on disc, 2) invalidates the cache for that unit,
		 3) marks corrupt in h_unitable, and 4) returns icorrupt in
		 ioresult iff the cache record was on same disc as dam
		 target disc. SFB}
	    write_buf(buf_hdr);
	  recover       {handle other media errors in-line here, and handle
			 errors on dam target unit in hfsdam cleanup code. SFB}
	   if unit = h_unitable^.tbl[unit_wanted].base_unum then
	     escape(escapecode) {let dam cleanup handle it. SFB}
	   else
	     begin
	       nuke_unit(unit);   {if not dam target unit that failed, then
				   continue with dam call. SFB}
	       ioresult := ord(inoerror);
	     end;
	is_dirty := false;
	use_count := 1;
	unit := unit_wanted;
	disc_addr := addr_wanted;
	size1 := size;
    end;

    {The following bugfix (marked {SFB} {) protects the cache if a too-large
     read is requested. If the requested read is > max_size of the buffer,
     treat as non-corrupting error (escape(0)), and exit the DAM quietly with
     ioresult.
     Make sure to mark the buffer not in use, and invalid, but leave other
     info there as an aid to future debugging.
     This is related to the bug wherein CTABLE never gets floppies accounted
     for in the cache configuration. Only cgroups and superblocks can request
     too-large transfers, as inodes and dblocks are fixed in size.
     SFB
    }

     if do_read then
      with buf_hdr^ do                  {SFB}
       if size>max_size then            {SFB}
	begin                           {SFB}
	 use_count := 0;                {SFB}
	 unit := NO_UNIT;               {SFB}
	 ioresult := ord(zdvrnoconfig); {SFB}
	 escape(0);                     {SFB}
	end                             {SFB}
     else                               {SFB}
	begin
	try
	    read_buf(buf_hdr);
	recover
	    if escapecode <> -10 then
	     ioresult := ord(zcatchall); {Exercise cleanup code. SFB};
	if ioresult <> ord(inoerror) then begin
	    buf_hdr^.use_count := 0;
	    buf_hdr^.unit := NO_UNIT;
	    goto 999;
	end;
    end;

    get_buf := buf_hdr^.dptr;
999:
    {
    { ioresult during get_buf -> escape
    { ioresult when get_buf called -> just pass it on
    }
    if ioresult <> ord(inoerror) then
	escape(-10);
    ioresult := tmpioresult;
end;

{
{ generic put routine
{ buf_addr is the buffer
{ in front of every buffer is a pointer to its hdr
{ "how" tells what to do with it:
{       release -- finished with it
{       dirty -- will need writing
{       immediate -- immediate write_through
{       invalid -- buffer contents worthless, reuse buffer whenever needed
}
procedure put_buf(anyvar buf_addr: ipointer;
		  how: cache_action_set);
label
    999;
var
    buf_hdr: buf_hdr_ptr_type;
begin
    if buf_addr = nil then
	goto 999;

    buf_addr := addr(buf_addr^, -bytes_per_ptr);
    buf_hdr := buf_hdr_ptr_type(buf_addr^);

    with buf_hdr^ do begin

	{ set the dirty bit? }
	if dirty in how then begin
	    is_dirty := true;
	end;

	{ flush the buffer? }
	if immediate in how then begin
	    try {mark corrupt for ALL escapes. SFB}
	      write_buf(buf_hdr);
	    recover
	      ioresult:=ord(icorrupt);
	    is_dirty := false;
	end;

	{ time stamping?  write only if dirty, and ignore errors }
	if stamping in how then
	    if is_dirty then begin
		try
		    write_buf(buf_hdr);
		recover
		    if escapecode<>-10 then     {SFB}
		     ioresult:=ord(icorrupt)
		    else
		     if ioresult = ord(zprotected) then
			ioresult := ord(inoerror);
		is_dirty := false;
	    end;

	if release in how then begin
	    if use_count = 1 then begin {tick pclock only when use_count going
					 to 0. SFB}
		pclock := pclock + 1;
		age := pclock;
	    end;

	    if use_count > 0 then       {added to fix 9895A bug. Note that
	       if we have range check off, and let use_count (0..255) go
	       negative, the COMPILER has generated code that doesn't
	       strip off sign extend bits before putting field back in
	       record.  Item was getting set to 7, and is_dirty to True.
	       It's ok to release more than it gets; error handling strategy
	       at higher levels of code releases all cache records, even if
	       they haven't been got yet, or they're already released.
	       It's easier that way.
	       SFB}
	     use_count := use_count - 1;
	end;

	if invalid in how then
	    { don't save buffer contents }
	    unit := NO_UNIT;
    end;
999:
end;





{--------------------------------------------------------------------------}
{
{ cache setup and configuration
}

{
{ init_cache
{ determine the size of the cache, and allocate the space.
{ buffer headers are left alone until we see how big sbs and cgs are.
}
procedure init_cache;
type
    urec = record
	user_cache_bytes: integer;
	simultaneous_hfs_discs: integer;
    end;
    urec_ptr = ^urec;
var
    i: integer;
    user_ip: urec_ptr;
    max_supers: integer;
begin
    init_support;

    user_ip := urec_ptr(value('HFS_USER_CACHE_INFO'));

    { allocate cache buffer space if not done already }
    if cache_space = nil then begin
	if user_ip = nil then
	    cache_bytes := DEF_CACHE_BYTES
	else
	    cache_bytes := max(MIN_CACHE_BYTES, user_ip^.user_cache_bytes);

	{ newbytes escapes if no memory }
	try
	    newbytes(cache_space, cache_bytes);
	    initialized := true;
	recover
	    ;
    end;

    if user_ip = nil then
	max_supers := MAX_SBS
    else
	max_supers := max(MIN_SBS, user_ip^.simultaneous_hfs_discs);

    { and init some variables }
    max_count[I_INODE]   := MAX_INODES;
    max_count[I_SB]      := max_supers;
    max_count[I_CG]      := MAX_CGS;
    max_count[I_DATABLK] := MAX_DATABLKS;

    min_count[I_INODE]   := MIN_INODES;
    min_count[I_SB]      := MIN_SBS;
    min_count[I_CG]      := MIN_CGS;
    min_count[I_DATABLK] := MIN_DATABLKS;

    unit_count[I_INODE]   := UNIT_INODES;
    unit_count[I_SB]      := UNIT_SBS;
    unit_count[I_CG]      := UNIT_CGS;
    unit_count[I_DATABLK] := UNIT_DATABLKS;

    item_size[I_INODE]   := sizeof(binode_type);
    item_size[I_DATABLK] := cache_blk_size;
    item_size[I_SB]      := 0;
    item_size[I_CG]      := 0;


    {
    { Configure the cache, in case TABLE never tries.
    }
    configure_cache;

end;


{
{ init_hfs_unit
{ look at the superblock and calculate the size of
{ the superblock, with cylinder summary info, and the
{ size of the cgroup, with freemap.  item_size[I_SB and I_CG]
{ has the maximum of these two sizes.
{ The cache isn't set up yet, but the buffer space is allocated,
{ so we just use the beginning of that space.
{ If it looks like an HFS unit, we call init_support_unit,
{ which will set "is_hfs_unit" in the h_unitable.
}
procedure init_hfs_unit(unum: integer; force: boolean);
label
    999;
var
    fs: super_block_ptr_type;
    base_unum: integer;
    is_hfs: boolean;
    f: fib;     {SFB}
begin
    is_hfs := false;

    if not initialized then begin
	ioresult := ord(zdvrnoconfig);
	goto 999;
    end;

    { read in fixed part of superblock }
    fs := super_block_ptr_type(cache_space);
    fs^.magic := 0;

    {The following lines fix a bug wherein floppies were not influencing
     cache configuration properly. The superblock and cylinder group size
     were not being computed for floppies, since the following get_bytes would
     fail, so the superblock was not being read. The getbytes failed because
     the fpeof in tempfib was 0, giving ioresult = ieof. Fpeof was 0 because
     umaxbytes was 0 at this point, and unblockeddam (called by setuptempfib,
     called by set_unit) merely copies umaxbytes -> fpeof. Umaxbytes is 0
     for floppies at this point because CTABLE assign_floppy_pair passes in 0
     to tea_CS80_sv, which passes it to p_umaxbytes in tea, whereas the CTABLE
     { local hard discs } { section calls get_CS80_parms, and passes mb in to
     tea_CS80_mv.
     See CTABLE for comments on clearunit being used to set umaxbytes. The
     clearunit call in assign_and_clear_unit happens too late for the need
     described here.
     The bug symptom was the cache getting smashed when a floppy had a larger
     cgroup cache record size requirement than any disc interrogated during
     CTABLE (only hard discs were being interrogated). There is a related fix
     in get_buf, to refuse a too-large read_buffer request.
     SFB/LAF
    }

    ioresult := ord(inoerror);                                          {SFB}
    f.funit := unum;                                                    {SFB}
    call(unitable^[unum].tm, addr(f), clearunit, unum, 0, 0);           {SFB}

    unum := set_unit(unum);
    try
	get_bytes(unum, sizeof(fs^), SBLOCK*DEV_BSIZE, windowp(fs));
    recover
	if (ioresult = ord(zmediumchanged))
	and not unitable^[unum].umediavalid then begin
	    try
		ioresult := ord(inoerror);
		get_bytes(unum, sizeof(fs^), SBLOCK*DEV_BSIZE, windowp(fs));
	    recover
		goto 999;
	end
	else    goto 999;

    { is it HFS? }
    with fs^ do
    if (magic <> FS_MAGIC)
	or (fsize * frag <> bsize)
	or (minfree < 0)
	or (minfree > 100)
	or (sbsize < sizeof(fs^))
	or (cpg * ncg < ncyl) then
	    goto 999;

    with fs^ do
	{ size * fsize must fit in 31 bits (disk byte addressable) }
	if (size < 0) or (maxint div size < fsize) then
	    goto 999;

    is_hfs := true;

    { sb = sb_bytes + cs_bytes }
    item_size[I_SB] := max(item_size[I_SB],
			   actual_sb_size(fs) + actual_cs_size(fs));
    item_size[I_CG] := max(item_size[I_CG],
			   actual_cg_size(fs));

999:
    if is_hfs or force then
	init_support_unit(unum, is_hfs and (fs^.clean <> chr(FS_CLEAN)));
end;


{
{ configure_cache
{ Here we allocate the cache memory to the buffer headers.
{ The algorithm is as follows:
{ We have a replicable unit consisting of unit_count[i] copies
{ of item i.
{ We allocate this unit as many times as fits.
{ We may have to adjust by maximum counts on some items.
{ If this unit doesn't fit at all, then we take min_count[i] copies.
{ If even that won't fit, we panic.
{ Each buffer item is preceded by a pointer back to its buffer header.
{ This is not included in the various types, so we have
{ to add room for it here.
}
procedure configure_cache;
label
    999;
var
    unit_bytes: integer;
    factor: integer;
    half_extra, extra_bytes: integer;
    i: integer;
    cache_address: ipointer;
begin
    if not initialized then
	goto 999;

    {
    { supply defaults if never could look at the disc.
    { The defaults are small, because this only happens in the
    { case of floppies, where TABLE can force an assignment of
    { HFSDAM even when the disc isn't in the drive yet.
    }
    if item_size[I_SB] = 0 then begin
	item_size[I_SB] := 3*1024;
	item_size[I_CG] := 2*1024;
    end;

    { account for back pointer for each type, plus header }
    for i := 1 to NUM_ITEMS do
	buf_size[i] := item_size[i] + bytes_per_ptr + sizeof(buf_hdr_type);

    { how many bytes in the unit? }
    unit_bytes := 0;
    for i := 1 to NUM_ITEMS do
	unit_bytes := unit_bytes + (unit_count[i] * buf_size[i]);

    { how many times does it fit in the cache? }
    factor := cache_bytes div unit_bytes;
    if factor > 0 then
	{ unit can be replicated }
	for i := 1 to NUM_ITEMS do
	    buf_count[i] := min(factor * unit_count[i], max_count[i])
    else
	{ Not enough space to replicate unit }
	for i := 1 to NUM_ITEMS do
	    buf_count[i] := min_count[i];

    { calculate extra bytes }
    extra_bytes := cache_bytes;
    for i := 1 to NUM_ITEMS do
	extra_bytes := extra_bytes - (buf_count[i] * buf_size[i]);

    { if none, panic }
    if extra_bytes < 0 then begin
	initialized := false;
	goto 999;
    end;

    { give half of the extra space to the datablks }
    half_extra := extra_bytes div 2;
    extra_bytes := half_extra;
    allocate_extra(half_extra, I_DATABLK);

    { the other half goes to the inodes }
    extra_bytes := extra_bytes + half_extra;
    allocate_extra(extra_bytes, I_INODE);

    { now allocate the cache space }
    cache_address := ipointer(cache_space);
    for i := 1 to NUM_ITEMS do begin
	assign_space(cache_address, buf_count[i], buf_size[i], i);
    end;

999:
    {
    { reinit enough vars so can run TABLE again (or for the first time)
    { (TABLE calls init_hfs_unit + configure_cache)
    }
    item_size[I_SB]      := 0;
    item_size[I_CG]      := 0;
end;


{
{ sync the cache by flushing all dirty buffers
}
procedure sync;
label
    999;
var
    i, j: integer;
    buf_hdr: buf_hdr_ptr_type;
    tmpioresult: integer;
begin
    if not initialized then
	goto 999;
    tmpioresult := ioresult;
    for i := 1 to NUM_ITEMS do
	for j := 1 to buf_count[i] do begin
	    buf_hdr := addr(buf_hdrs_array[i]^[j]);
	    with buf_hdr^ do begin
		use_count := 0;
		{fix for cacher bug with 2 HFS units and LIBRARIAN. SFB 6-16-87}
		if is_dirty and (unit=current_unit) {SFB} then begin
		    try
			write_buf(buf_hdr);
		    recover begin
			if tmpioresult = ord(inoerror) then
			    tmpioresult := ioresult;
			invalidate_unit(unit);
		    end;
		    is_dirty := false;
		end;
	    end;
	end;
999:
    ioresult := tmpioresult;
end;

$if FALSE$
{
{ check_cache
{ debug routine
{ caller asserts that all buffers have been released.
}
{ Don't enable except for test. SFB
procedure check_cache;
label
    999;
var
    i, j: integer;
begin
    if not initialized then
	goto 999;
    for i := 1 to NUM_ITEMS do
	for j := 1 to buf_count[i] do
	    if buf_hdrs_array[i]^[j].use_count <> 0 then
		writeln('BUFFER STILL IN USE, type ', i:1, ' # ', j:1);
999:
end;
}

{
{ print the cache statistics
}
procedure cache_stats;
label
    999;
var
    i: integer;
begin
    if not initialized then
	goto 999;
    for i := 1 to NUM_ITEMS do begin
	if i = I_INODE then
	    write('inodes  ')
	else
	if i = I_SB then
	    write('supers  ')
	else
	if i = I_CG then
	    write('cgroups ')
	else
	if i = I_DATABLK then
	    write('data    ');
	write('(', buf_count[i]:2, ' bufs) ');
	write(cache_lazy[i]:4, ' L, ');
	write(cache_flush[i]:4, ' F, ');
	if cache_lazy[i] + cache_flush[i] = 0 then
	    write('L/F+L  0%, ')
	else
	    write('L/F+L ', ((cache_lazy[i]*100)
		div (cache_lazy[i] + cache_flush[i])):2, '%, ');
	write(cache_hit[i]:4, ' H, ');
	write(cache_miss[i]:4, ' M, ');
	if cache_hit[i] + cache_miss[i] = 0 then
	    writeln('hit rate  0%')
	else
	writeln('hit rate ', ((cache_hit[i]*100)
		div (cache_hit[i] + cache_miss[i])):2, '%');
    end;
    for i := 1 to NUM_ITEMS do begin
	cache_hit[i] := 0;
	cache_miss[i] := 0;
	cache_lazy[i] := 0;
	cache_flush[i] := 0;
    end;

999:
end;
$end$

{
{ invalidate_unit
{ invalidate (no flush) all buffers on this unit.
{ if this unit is ALL_UNITS, then we invalidate on every unit.
{ if BASE_UNIT, then use current_unit.
}
procedure invalidate_unit(badunit: integer);
label
    999;
var
    i, j: integer;
begin
    if not initialized then
	goto 999;
    { force use of base unit }
    if badunit <> ALL_UNITS then
	badunit := h_unitable^.tbl[badunit].base_unum;
    for i := 1 to NUM_ITEMS do
	for j := 1 to buf_count[i] do
	    with buf_hdrs_array[i]^[j] do
		if (badunit = ALL_UNITS) or (badunit = unit) then begin
		    unit := NO_UNIT;
		    is_dirty := false;
		    use_count := 0;
		end;
999:
end;


{----------------------------------------------------------------}
{
{ EXPORTED DATA ACCESS ROUTINES
}

{
{ get a superblock
{ We check the disc with every get_superblock to see if it's
{ still OK.  If not, we invalidate the unit, and return nil.
}
function  get_superblock(unum: shortint): super_block_ptr_type;
var
    fs: super_block_ptr_type;
begin
    unum := set_unit(unum);
    try
      fs := super_block_ptr_type(get_buf(unum,
				       SBLOCK*DEV_BSIZE,
				       0,
				       I_SB));
    recover
	fs := nil;

    if (fs = nil) then begin
	invalidate_unit(unum);
	medium_gone;
    end;
    current_unit := unum;
    current_super := fs;
    get_superblock := fs;
end;


{
{ put superblock back
}
procedure put_superblock(fs: super_block_ptr_type;
			 how: cache_action_set);
begin
    if (fs <> nil) and (fs^.fmod = FS_MODIFIED) then begin
	how := how + [dirty];
	fs^.fmod := FS_NOT_MODIFIED;
    end;
    put_buf(fs, how);
end;


{
{ get a cgroup
}
function  get_cgroup(groupnum: integer): cgroup_ptr_type;
begin
    get_cgroup :=
	cgroup_ptr_type(get_buf(current_unit,
				cgroup_start(current_super, groupnum),
				actual_cg_size(current_super),
				I_CG));
end;


{
{ put cgroup back
}
procedure put_cgroup(cgp: cgroup_ptr_type;
		     how: cache_action_set);
begin
    put_buf(cgp, how);
end;


{
{ get an inode
}
function  get_inode(nodenum: integer): inode_ptr_type;
var
    ip: inode_ptr_type;
begin
    get_inode := nil;   {ensure no random garbage. iptrs are usually
			 instantiated from the stack. SFB}
    with current_super^ do
	if (nodenum < 1) or (nodenum > ncg*ipg) then begin
	    ioresult := ord(znosuchblk);
	    escape(-10);
	end;
    ip := inode_ptr_type(get_buf(current_unit,
				 inode_start(current_super, nodenum),
				 sizeof(inode_type),
				 I_INODE));
    if ip <> nil then
	binode_ptr_type(ip)^.inumber := nodenum;
    get_inode := ip;
end;


{
{ put inode back
}
procedure put_inode(ip: inode_ptr_type;
		    how: cache_action_set);
begin
    put_buf(ip, how);
end;


{
{ get_data1
{ subroutine of get_datablk and get_edatablk
{ we want the cache block that contains the byte addressed
{ by offset, where this is the offset within the given frag.
}
function  get_data1(fragnum: frag_type;
		    offset: integer;
		    item: integer): cache_blk_ptr_type;
var
    daddr: integer;
begin
    daddr := rounddownp2(fragstobytes(current_super, fragnum) + offset,
			 cache_blk_size);
    get_data1 :=
	cache_blk_ptr_type(get_buf(current_unit,
				   daddr,
				   cache_blk_size,
				   item));
end;


{
{ get a datablk
}
function  get_datablk(fragnum: frag_type;
		      offset: integer): cache_blk_ptr_type;
begin
    get_datablk := get_data1(fragnum, offset, I_DATABLK);
end;


{
{ get a datablk, but don't read the disc if it isn't in
{ the cache, because we're about to overwrite it anyway.
}
function  get_edatablk(fragnum: frag_type;
		       offset: integer): cache_blk_ptr_type;
begin
    get_edatablk := get_data1(fragnum, offset, I_EDATABLK);
end;


{
{ put datablk back
}
procedure put_datablk(dp: cache_blk_ptr_type;
		      how: cache_action_set);
begin
    put_buf(dp, how);
end;


{
{ Miscellany
}

{
{ Given an inode ptr, return the inode number.
}
function inumber(iptr: inode_ptr_type): integer;
begin
    inumber := binode_ptr_type(iptr)^.inumber;
end;

{
{ return the inode type (IFREG, IFDIR, etc).
}
function itype(ip: inode_ptr_type): integer;
begin
    itype := binand(ip^.mode, IFMT);
end;

end.


@


56.2
log
@
pws2rcs automatic delta on Wed Jan 27 11:57:27 MST 1993
@
text
@d1 1446
@


56.1
log
@Automatic bump of revision number for PWS version 3.25
@
text
@a0 1446
$modcal$

$linenum 7000$
$lines 54$

$partial_eval on$

$range off$
$ovflcheck off$
$debug off$

{
{ HFS CACHE
{ handles caching for
{       superblocks
{       cgroups
{       inode
{       data (indir blocks, dir contents)
}


module hfscache;

$search 'hfstuff', 'hfscalc', 'hfsupport'$

import
    sysdevs,
    hfstuff,
    asm,
    hfscalc,
    hfsupport,
    sysglobals,
    iocomasm;


{
{ Rules of usage:
{ SET-UP:
{  1.  call init_cache first
{  2.  call init_hfs_unit for every hfs unit
{  3.  call configure_cache once step 2 is finished
{ NORMAL USE:
{  1.  call get_superblock before doing anything else with a
{       given unit.
{  2.  only one unit is active at a time.  get_superblock makes it
{       active.  All other calls use the active unit.
{  3.  call sync to flush all buffers to disc.  This is NOT done
{       automatically by hfsalloc, so must be done in the DAM/TM.
{       Sync also sets all use counts to 0 (to recover from escapes),
{       so call it only when all put_* have been done.
{  4.  for debugging, call check_cache when a transaction is finished.
{       It complains if any of the cache is still in use.
{  5.  call invalidate_unit to invalidate any in-core buffers.  This
{       MUST happen when the user swaps floppies in the same drive,
{       for example.
{  6.  get/put work as follows.  Call get_* to get *.  When you're
{       through with it, call put_* with release.  If you ever
{       write to *, call put_* with dirty or immediate.
{       dirty/immediate/release can be combined as desired in
{       a single put_* call.  There is also "invalid", which means that
{       the buffer contents are invalid.
{ INTERRUPTS:
{      Because caching is done in static heap space, the hfs driver is NOT
{       reentrant with respect to interrupts.  ie, if you call the AM, DAM,
{       or the hfsTM from within an ISR, you risk messing up any lower
{       level transactions currently happening, even if you've got your
{       own FIB!
{ MEDIA CHANGE:
{      Exactly the same as "SETUP" above.
}

{
{ How the cache is set up:
{  1.  Fixed-size block allocated by init_cache.  "hfs_cache_bytes" is size
{       in bytes of cache.  This size is user-configurable,
{       in the module hfs_user.
{  2.  init_hfs_unit looks at the beginning of the superblock and
{       calculates how big each superblock and cgroup really are.
{       It remembers the biggest.  Under PAWS, it should be called
{       by TABLE.
{  3.  configure_cache takes info from step 2 and doles out the space
{       allocated in step 1.  It is called by TABLE
{       under PAWS.  Configuration consists of deciding how many
{       of each buffer we will have, and setting up the buffer headers
{       accordingly.
}


export

const
    { size of cache block in bytes.  Must be power of 2, >= 1K }
    cache_blk_size = 1024;

type
    cache_blk_type = packed array[0..cache_blk_size-1] of char;
    cache_blk_ptr_type = ^cache_blk_type;
    cache_action_type = (release, dirty, immediate, invalid, stamping);
    cache_action_set = set of cache_action_type;

    { inode a little bigger in core }
    binode_type = packed record
	inode: inode_type;
	inumber: integer;
    end;
    binode_ptr_type = ^binode_type;

var
    current_super: super_block_ptr_type;


{
{ cache control
}
procedure init_cache;
procedure init_hfs_unit(unum: integer; force: boolean);
procedure configure_cache;
procedure sync;
procedure invalidate_unit(badunit: integer);

{
{ disc access for each item
}
function  get_superblock(unum: shortint): super_block_ptr_type;
procedure put_superblock(fs: super_block_ptr_type;
			 how: cache_action_set);

function  get_cgroup(groupnum: integer): cgroup_ptr_type;
procedure put_cgroup(cgp: cgroup_ptr_type;
		     how: cache_action_set);

function  get_inode(nodenum: integer): inode_ptr_type;
procedure put_inode(ip: inode_ptr_type;
		    how: cache_action_set);

function  get_datablk(fragnum: frag_type;
		      offset: integer): cache_blk_ptr_type;
function  get_edatablk(fragnum: frag_type;
		       offset: integer): cache_blk_ptr_type;
procedure put_datablk(dp: cache_blk_ptr_type;
		      how: cache_action_set);

{
{ miscellaneous
}
function inumber(iptr: inode_ptr_type): integer;
function itype(ip: inode_ptr_type): integer;
procedure nuke_unit(unit : integer);

implement

const
    { for enumerating item types -- must begin with 1 }
    I_SB = 1;
    I_CG = 2;
    I_INODE = 3;
    I_DATABLK = 4;
    NUM_ITEMS = 4;

    { for "empty" (no disc read) data blks, <> any item type }
    I_EDATABLK = -1;

type
    { buffer control record }
    buf_hdr_ptr_type = ^buf_hdr_type;
    buf_hdr_type = packed record
	is_dirty: boolean;
	item: 1..NUM_ITEMS;
	use_count: 0..255;
	unit: shortint;
	disc_addr: integer;
	age: integer {was ushort. SFB};
	max_size: ushort;
	size1, size2: shortint;
	next: buf_hdr_ptr_type;
	case integer of
	    1: (iptr: inode_ptr_type);
	    2: (fsptr: super_block_ptr_type);
	    3: (cgptr: cgroup_ptr_type);
	    4: (dptr: cache_blk_ptr_type);
	    5: (wptr: windowp);
    end;
    buf_hdr_array_type = array[1..maxint] of buf_hdr_type;
    buf_hdr_array_ptr_type = ^buf_hdr_array_type;

type
    item_info_type = array[1..NUM_ITEMS] of integer;

const
    { max number of each item in cache }
    MAX_SBS = 2;
    MAX_CGS = 20;
    MAX_INODES = 100;
    MAX_DATABLKS = 50;

    { number of each item in cache "unit" }
    UNIT_SBS = 1;
    UNIT_CGS = 3;
    UNIT_INODES = 10;
    UNIT_DATABLKS = 3;

    { can't survive with fewer items than this }
    MIN_SBS = 1;
    MIN_CGS = 1;
    MIN_INODES = 3;
    MIN_DATABLKS = 2;

    { minimum cache size }
    MIN_CACHE_BYTES = 10*1024;

    { default cache size }
    DEF_CACHE_BYTES = 15*1024;

    { illegal unit number }
    NO_UNIT = -1;

    { and another, which can be the same }
    ALL_UNITS = -1;

    bytes_per_ptr = 4;

var
    { address of buf hdrs for each item }
    buf_hdrs_array: array[1..NUM_ITEMS] of buf_hdr_array_ptr_type;

    { linked list of buf hdrs for each item }
    buf_list_array: array[1..NUM_ITEMS] of buf_hdr_ptr_type;

    { how many of each item actually present }
    buf_count: item_info_type;

    { how many items in a tiled unit }
    unit_count: item_info_type;

    { maximum, minimum count for each item }
    max_count: item_info_type;
    min_count: item_info_type;

    { size of each item }
    item_size: item_info_type;

    { size of each buffer -- item size plus a backptr }
    buf_size: item_info_type;

    { array of buffers }
    cache_space: anyptr;
    { size of the array }
    cache_bytes: integer;

    { pseudo-clock for buffer aging }
    pclock: integer {was ushort. SFB} ;

    { where current_super lives }
    current_unit: shortint;

    { has cache been properly initialized? }
    initialized: boolean;

{-------------------------------------------------------------------------}
{
{ Miscellaneous utility routines
}

{
{ actual_sb_size
{ calculates size of sb, not including csumm info, that
{ we read and write.  this info is in superblock, but always
{ rounded up to fragment, too coarse for our purposes.
{ sb size is the fixed part, plus 1 byte for each big block
{ in a cylinder cycle.
}
function actual_sb_size(fs: super_block_ptr_type): integer;
var
    sb_bytes: integer;
begin
    sb_bytes := sizeof(fs^) + howmany(fs^.spc * fs^.cpc, nspb(fs));
    actual_sb_size := roundup(sb_bytes, DISK_SECTOR);
end;

{
{ actual_cs_size
{ size of csumm info
{ cs size is one csum structure per cylinder group
}
function actual_cs_size(fs: super_block_ptr_type): integer;
var
    cs_bytes: integer;
begin
    cs_bytes := fs^.ncg * sizeof(csumm_type);
    actual_cs_size := roundup(cs_bytes, DISK_SECTOR);
end;

{
{ actual_cg_size
{ size of cg info
{ cg size is fixed part, plus 1 bit for each fragment in the
{ cylinder group.
}
function actual_cg_size(fs: super_block_ptr_type): integer;
var
    cg_bytes: integer;
begin
    cg_bytes := sizeof(cylinder_group_block_type) + howmany(fs^.fpg, NBBY);
    actual_cg_size := roundup(cg_bytes, DISK_SECTOR);
end;


{
{ assign_space
{ We have decided how many headers and buffers for each item can fit into
{ the given cache space.  Now we assign the headers and buffers for a single
{ item.  count is how
{ many we can afford.  size is the size of each buffer plus header.
{ cache_address is the address of the space we are allotted,
{ which we keep up to date.
{ The beginning of the space is taken for buffer headers,
{ and the end for buffers.  The hdrs are initialized, and the
{ hdrs and buffers are pointed to each other.
}
procedure assign_space(var cache_address: ipointer;
		       count, size: integer;
		      this_item: integer);
var
    i: integer;
    buf_hdr: buf_hdr_ptr_type;
begin
    { first get the hdr space }
    buf_hdrs_array[this_item] := buf_hdr_array_ptr_type(cache_address);
    buf_list_array[this_item] := buf_hdr_ptr_type(cache_address);
    buf_hdr := buf_hdr_ptr_type(cache_address);
    cache_address := addr(cache_address^, count*sizeof(buf_hdr_type));
    size := size - sizeof(buf_hdr_type);

    { then initialize each hdr-buffer pair }
    for i := 1 to count do begin
	with buf_hdr^ do begin
	    is_dirty := false;
	    item := this_item;
	    use_count := 0;
	    unit := NO_UNIT;
	    max_size := size - bytes_per_ptr;
	    dptr := addr(cache_address^, bytes_per_ptr);
	    if i = count then
		next := nil
	    else
		next := addr(buf_hdr^, sizeof(buf_hdr_type));
	end;
	{ back ptr from buffer to buf hdr }
	cache_address^ := integer(buf_hdr);
	cache_address := addr(cache_address^, size);
	buf_hdr := addr(buf_hdr^, sizeof(buf_hdr_type));
    end;
end;


{
{ allocate_extra
{ takes some extra cache space and allocates as much of it
{ as possible to the given item.  There is extra cache space
{ because the unit of allocation might not fit into the cache
{ space an integral number of times, or because some buf hdr
{ arrays (like sb) are kept deliberately small.
{ updates extra_bytes to show how much taken.
}
procedure allocate_extra(var extra_bytes: integer; item: integer);
var
    extra_bufs: integer;
begin
    extra_bufs := min(max_count[item] - buf_count[item],
		      extra_bytes div buf_size[item]);
    buf_count[item] := buf_count[item] + extra_bufs;
    extra_bytes := extra_bytes - (extra_bufs * buf_size[item]);
end;

{-------------------------------------------------------------------------}
{
{ buffer read and write routines
}

{
{ read_buf
{ reads a buffer from the disc
{ caller sets:
{       unit
{       disc_address
{       item
{       size1 (ignored for superblocks)
{ max_size shows the max buffer size available.
{ size1 shows how much we should read.
{ for superblocks, there is a small bootstrap problem,
{ because we don't know how much to read until we see it.
{ so we read the max size, then set size1 and size2 and
{ read the csumm info.
{ ioresult shows success or failure.
}
procedure read_buf(buf_hdr: buf_hdr_ptr_type);
label
    999;
var
    sector_buf: packed array[1..DISK_SECTOR] of char;
    sector_addr: integer;
    buf_iptr: inode_ptr_type;
    cs_size: integer;
    cs_mem_addr: csumm_array_ptr_type;
    i: integer;
begin
    with buf_hdr^ do
	case item of

	  I_INODE:
	    begin
		{ get address of sector holding this inode }
		sector_addr := rounddownp2(disc_addr, DISK_SECTOR);
		{ read in the sector }
		get_bytes(unit, DISK_SECTOR, sector_addr, addr(sector_buf));
		{ and copy the inode }
		buf_iptr := addr(sector_buf, disc_addr - sector_addr);
		moveleft(buf_iptr^, iptr^, size1);
	    end;

	  I_DATABLK, I_CG:
	    begin
		get_bytes(unit, size1, disc_addr, wptr);
		if (ioresult = ord(inoerror)) and (item = I_CG)
		and (cgptr^.magic <> CG_MAGIC) then begin
		    set_corrupt;
		    ioresult := ord(icorrupt);
		end;
	    end;

	  I_SB:
	    begin
		{ read the superblock }
		get_bytes(unit, min(SBSIZE, max_size), disc_addr, wptr);
		if fsptr^.magic <> FS_MAGIC then begin
		    ioresult := ord(inodirectory);
		    goto 999;
		end;
		if fsptr^.clean <> chr(FS_CLEAN) then
		    set_corrupt;
		size1 := actual_sb_size(fsptr);
		size2 := actual_cs_size(fsptr);
		{ read in the csumm info }
		get_bytes(unit, size2,
			  fragstobytes(fsptr, fsptr^.csaddr),
			  addr(wptr^, size1));
		{ install pointers to csumm info in superblock }
		cs_size := size2;
		cs_mem_addr := addr(wptr^, size1);
		i := 0;
		while cs_size > 0 do begin
		    fsptr^.csp[i] := cs_mem_addr;
		    cs_size := cs_size - fsptr^.bsize;
		    cs_mem_addr := addr(cs_mem_addr^, fsptr^.bsize);
		    i := i + 1;
		end;
	    end;

	 {unknown item in cache. this is DISASTER, and we must DIE!}
	  OTHERWISE begin ioresult:=ord(zdvrnoconfig); escape(-10); end;

	end;
999:
end;


{
{ write_buf
{ writes a buffer to disc
{ the size of the item is in the buffer header, as is
{ the disc address.  "size1" is the size of the item
{ currently there; if this is a superblock, then
{ "size2" gives the csumm info size.
}
procedure write_buf(buf_hdr: buf_hdr_ptr_type);
label
    999;
var
    sector_buf: packed array[1..DISK_SECTOR] of char;
    sector_addr: integer;
    buf_iptr: inode_ptr_type;
begin
    with buf_hdr^ do if unit <> NO_UNIT then begin
	case item of

	  I_INODE:
	    begin
		{ get address of sector holding this inode }
		sector_addr := rounddownp2(disc_addr, DISK_SECTOR);
		{ read in the sector }
		get_bytes(unit, DISK_SECTOR, sector_addr, addr(sector_buf));
		if ioresult <> ord(inoerror) then
		    goto 999;
		{ copy the inode }
		buf_iptr := addr(sector_buf, disc_addr - sector_addr);
		moveleft(iptr^, buf_iptr^, size1);
		{ and put the sector back }
		put_bytes(unit, DISK_SECTOR, sector_addr, addr(sector_buf));
	    end;

	  I_DATABLK, I_CG:
	    begin
		if item = I_CG then
		    cgptr^.time := sysgmttime;
		put_bytes(unit, size1, disc_addr, wptr);
	    end;

	  I_SB:
	    begin
		fsptr^.time := sysgmttime;
		put_bytes(unit, size1, disc_addr, wptr);
		if ioresult <> ord(inoerror) then
		    goto 999;
		put_bytes(unit, size2,
			  fragstobytes(fsptr, fsptr^.csaddr),
			  addr(wptr^, size1));
	    end;

	 {unknown item in cache. this is DISASTER, and we must DIE!}
	  OTHERWISE begin ioresult:=ord(zdvrnoconfig); escape(-10); end;

	end;
    end;
999:
end;



{---------------------------------------------------------------------------}
{
{ get and put buffers
}

{
{ block_in_cache
{ Returns given buffer if in cache.
{ If not in cache, returns
{       oldest unused buf hdr (if any)
{       an invalid buf hdr (if any)
{ buffers are linked in the next chain so that the most recently used
{ is first.
{ changed return type to buf_hdr_ptr_type from cache_blk_ptr_type, for
{ more generality and usefulness (see nuke_unit).
{ SFB
}
function block_in_cache(unit_wanted: integer;
			addr_wanted: integer;
			this_item: integer;
			var oldest_hdr: buf_hdr_ptr_type;
			var invalid_hdr: buf_hdr_ptr_type): buf_hdr_ptr_type;
label
    999;
var
    oldest_age: integer;
    buf_hdr, prev_buf_hdr: buf_hdr_ptr_type;
    i: integer;
begin

    if not initialized then begin
	ioresult := ord(zdvrnoconfig);
	escape(-10);
    end;

    block_in_cache := nil;

    oldest_hdr := nil;
    invalid_hdr := nil;

    buf_hdr := buf_list_array[this_item];
    prev_buf_hdr := nil;

    repeat
	with buf_hdr^ do begin
	    { found it? }
	    if (unit = unit_wanted) and (disc_addr = addr_wanted) then begin
		use_count := use_count + 1;
	       {block_in_cache := dptr; {SFB}
		block_in_cache := buf_hdr;      {SFB}
		{ maintain list in order of use }
		if prev_buf_hdr <> nil then begin
		    prev_buf_hdr^.next := next;
		    next := buf_list_array[this_item];
		    buf_list_array[this_item] := buf_hdr;
		end;
		goto 999;
	    end;
	    if invalid_hdr = nil then
		{ invalid? }
		if unit = NO_UNIT then
		    invalid_hdr := buf_hdr
		else
		{ oldest so far? }
		if use_count = 0 then
		    if (oldest_hdr = nil) or (age < oldest_age) then begin
			oldest_hdr := buf_hdr;
			oldest_age := age;
		    end;
	    prev_buf_hdr := buf_hdr;
	    buf_hdr := next;
	end;
    until buf_hdr = nil;
999:
end;

{ Procedure to really "clean up" after unexpected escape.
{ It is used whenever hfsdam gets to its main try-recover with
{ escapecode <> 0, or in get_buf, if a "foreign" (non dam target
{ unit) cache record causes escape while flushing. It tries to mark
{ the superblock dirty and flush, iff it is currently in cache, and
{ there is no error during flush. It will not try to load the
{ superblock if it's currently not there, as this is more complexity
{ and IO than we care for. It also removes cache records for that
{ unit from the cache, marks it is_corrupt in h_unitable, and
{ returns ioresult=ord(zcatchall), iff ioresult was 0 at entry.

{ Call it with the desired unum; it will figure out base_unum.
{
{ nuke_unit preserves escapecode, but always sets ioresult to something
{ non-zero.
{
{ SFB
}
procedure nuke_unit(unit : integer);
var
  in_cache_buf, oldest, invalid : buf_hdr_ptr_type;
  old_esccode, old_ior : integer;
begin
 unit := h_unitable^.tbl[unit].base_unum;
 old_ior := ioresult;
 old_esccode := escapecode;  {save so caller can use nuke_unit in
			      recover block}
 in_cache_buf := block_in_cache(unit, SBLOCK*DEV_BSIZE, I_SB, oldest,
				invalid);
 if in_cache_buf <> nil then
   try
    {mark superblock not_ok, and flush}
    in_cache_buf^.fsptr^.clean := chr(FS_NOTOK);
    write_buf(in_cache_buf);
   recover ; {ignore escapes, as we don't care if it fails}
 invalidate_unit(unit);
 h_unitable^.tbl[unit].fs_corrupt := true;
 sysescapecode := old_esccode;
 if old_ior = ord(inoerror) then
  ioresult:=ord(zcatchall)
 else
  ioresult:=old_ior;
end;



{
{ generic get routine
{ find buffer with given item and address
{ failing that, find invalid buffer (unit = NO_UNIT)
{ failing that, find oldest valid unused buffer
{ failing that, panic
{ read the disc if info not already in the cache.
{ exception: I_EDATABLK is for datablks that we shouldn't read.
}
function get_buf(unit_wanted: integer; addr_wanted: integer;
		 size, this_item: integer): cache_blk_ptr_type;
label
    999;
var
    do_read: boolean;
    buf_hdr: buf_hdr_ptr_type;
    oldest_hdr: buf_hdr_ptr_type;
    invalid_hdr: buf_hdr_ptr_type;
    in_cache_buf: buf_hdr_ptr_type;
    tmpioresult: integer;
begin
    get_buf := nil;
    tmpioresult := ioresult;
    ioresult := ord(inoerror);

    if this_item = I_EDATABLK then begin
	this_item := I_DATABLK;
	do_read := false;
    end
    else
	do_read := true;

    in_cache_buf := block_in_cache(unit_wanted, addr_wanted, this_item,
				   oldest_hdr, invalid_hdr);

    if in_cache_buf <> nil then begin
	{ block found in cache }
       {get_buf := in_cache_buf;        {SFB}
	get_buf := in_cache_buf^.dptr;  {SFB}
	goto 999;
    end;

    { not in cache.  can we use an available buffer? }
    if invalid_hdr <> nil then
	buf_hdr := invalid_hdr
    else
    if oldest_hdr <> nil then
	buf_hdr := oldest_hdr
    else begin
	ioresult := ord(zdvrcachefull);
	goto 999;
    end;

    { now we have a buffer }
    with buf_hdr^ do begin
	if is_dirty and (unit <> NO_UNIT) then
	  try   {make sure that any escape here: 1) tries to mark superblock
		 fs_notok on disc, 2) invalidates the cache for that unit,
		 3) marks corrupt in h_unitable, and 4) returns icorrupt in
		 ioresult iff the cache record was on same disc as dam
		 target disc. SFB}
	    write_buf(buf_hdr);
	  recover       {handle other media errors in-line here, and handle
			 errors on dam target unit in hfsdam cleanup code. SFB}
	   if unit = h_unitable^.tbl[unit_wanted].base_unum then
	     escape(escapecode) {let dam cleanup handle it. SFB}
	   else
	     begin
	       nuke_unit(unit);   {if not dam target unit that failed, then
				   continue with dam call. SFB}
	       ioresult := ord(inoerror);
	     end;
	is_dirty := false;
	use_count := 1;
	unit := unit_wanted;
	disc_addr := addr_wanted;
	size1 := size;
    end;

    {The following bugfix (marked {SFB} {) protects the cache if a too-large
     read is requested. If the requested read is > max_size of the buffer,
     treat as non-corrupting error (escape(0)), and exit the DAM quietly with
     ioresult.
     Make sure to mark the buffer not in use, and invalid, but leave other
     info there as an aid to future debugging.
     This is related to the bug wherein CTABLE never gets floppies accounted
     for in the cache configuration. Only cgroups and superblocks can request
     too-large transfers, as inodes and dblocks are fixed in size.
     SFB
    }

     if do_read then
      with buf_hdr^ do                  {SFB}
       if size>max_size then            {SFB}
	begin                           {SFB}
	 use_count := 0;                {SFB}
	 unit := NO_UNIT;               {SFB}
	 ioresult := ord(zdvrnoconfig); {SFB}
	 escape(0);                     {SFB}
	end                             {SFB}
     else                               {SFB}
	begin
	try
	    read_buf(buf_hdr);
	recover
	    if escapecode <> -10 then
	     ioresult := ord(zcatchall); {Exercise cleanup code. SFB};
	if ioresult <> ord(inoerror) then begin
	    buf_hdr^.use_count := 0;
	    buf_hdr^.unit := NO_UNIT;
	    goto 999;
	end;
    end;

    get_buf := buf_hdr^.dptr;
999:
    {
    { ioresult during get_buf -> escape
    { ioresult when get_buf called -> just pass it on
    }
    if ioresult <> ord(inoerror) then
	escape(-10);
    ioresult := tmpioresult;
end;

{
{ generic put routine
{ buf_addr is the buffer
{ in front of every buffer is a pointer to its hdr
{ "how" tells what to do with it:
{       release -- finished with it
{       dirty -- will need writing
{       immediate -- immediate write_through
{       invalid -- buffer contents worthless, reuse buffer whenever needed
}
procedure put_buf(anyvar buf_addr: ipointer;
		  how: cache_action_set);
label
    999;
var
    buf_hdr: buf_hdr_ptr_type;
begin
    if buf_addr = nil then
	goto 999;

    buf_addr := addr(buf_addr^, -bytes_per_ptr);
    buf_hdr := buf_hdr_ptr_type(buf_addr^);

    with buf_hdr^ do begin

	{ set the dirty bit? }
	if dirty in how then begin
	    is_dirty := true;
	end;

	{ flush the buffer? }
	if immediate in how then begin
	    try {mark corrupt for ALL escapes. SFB}
	      write_buf(buf_hdr);
	    recover
	      ioresult:=ord(icorrupt);
	    is_dirty := false;
	end;

	{ time stamping?  write only if dirty, and ignore errors }
	if stamping in how then
	    if is_dirty then begin
		try
		    write_buf(buf_hdr);
		recover
		    if escapecode<>-10 then     {SFB}
		     ioresult:=ord(icorrupt)
		    else
		     if ioresult = ord(zprotected) then
			ioresult := ord(inoerror);
		is_dirty := false;
	    end;

	if release in how then begin
	    if use_count = 1 then begin {tick pclock only when use_count going
					 to 0. SFB}
		pclock := pclock + 1;
		age := pclock;
	    end;

	    if use_count > 0 then       {added to fix 9895A bug. Note that
	       if we have range check off, and let use_count (0..255) go
	       negative, the COMPILER has generated code that doesn't
	       strip off sign extend bits before putting field back in
	       record.  Item was getting set to 7, and is_dirty to True.
	       It's ok to release more than it gets; error handling strategy
	       at higher levels of code releases all cache records, even if
	       they haven't been got yet, or they're already released.
	       It's easier that way.
	       SFB}
	     use_count := use_count - 1;
	end;

	if invalid in how then
	    { don't save buffer contents }
	    unit := NO_UNIT;
    end;
999:
end;





{--------------------------------------------------------------------------}
{
{ cache setup and configuration
}

{
{ init_cache
{ determine the size of the cache, and allocate the space.
{ buffer headers are left alone until we see how big sbs and cgs are.
}
procedure init_cache;
type
    urec = record
	user_cache_bytes: integer;
	simultaneous_hfs_discs: integer;
    end;
    urec_ptr = ^urec;
var
    i: integer;
    user_ip: urec_ptr;
    max_supers: integer;
begin
    init_support;

    user_ip := urec_ptr(value('HFS_USER_CACHE_INFO'));

    { allocate cache buffer space if not done already }
    if cache_space = nil then begin
	if user_ip = nil then
	    cache_bytes := DEF_CACHE_BYTES
	else
	    cache_bytes := max(MIN_CACHE_BYTES, user_ip^.user_cache_bytes);

	{ newbytes escapes if no memory }
	try
	    newbytes(cache_space, cache_bytes);
	    initialized := true;
	recover
	    ;
    end;

    if user_ip = nil then
	max_supers := MAX_SBS
    else
	max_supers := max(MIN_SBS, user_ip^.simultaneous_hfs_discs);

    { and init some variables }
    max_count[I_INODE]   := MAX_INODES;
    max_count[I_SB]      := max_supers;
    max_count[I_CG]      := MAX_CGS;
    max_count[I_DATABLK] := MAX_DATABLKS;

    min_count[I_INODE]   := MIN_INODES;
    min_count[I_SB]      := MIN_SBS;
    min_count[I_CG]      := MIN_CGS;
    min_count[I_DATABLK] := MIN_DATABLKS;

    unit_count[I_INODE]   := UNIT_INODES;
    unit_count[I_SB]      := UNIT_SBS;
    unit_count[I_CG]      := UNIT_CGS;
    unit_count[I_DATABLK] := UNIT_DATABLKS;

    item_size[I_INODE]   := sizeof(binode_type);
    item_size[I_DATABLK] := cache_blk_size;
    item_size[I_SB]      := 0;
    item_size[I_CG]      := 0;


    {
    { Configure the cache, in case TABLE never tries.
    }
    configure_cache;

end;


{
{ init_hfs_unit
{ look at the superblock and calculate the size of
{ the superblock, with cylinder summary info, and the
{ size of the cgroup, with freemap.  item_size[I_SB and I_CG]
{ has the maximum of these two sizes.
{ The cache isn't set up yet, but the buffer space is allocated,
{ so we just use the beginning of that space.
{ If it looks like an HFS unit, we call init_support_unit,
{ which will set "is_hfs_unit" in the h_unitable.
}
procedure init_hfs_unit(unum: integer; force: boolean);
label
    999;
var
    fs: super_block_ptr_type;
    base_unum: integer;
    is_hfs: boolean;
    f: fib;     {SFB}
begin
    is_hfs := false;

    if not initialized then begin
	ioresult := ord(zdvrnoconfig);
	goto 999;
    end;

    { read in fixed part of superblock }
    fs := super_block_ptr_type(cache_space);
    fs^.magic := 0;

    {The following lines fix a bug wherein floppies were not influencing
     cache configuration properly. The superblock and cylinder group size
     were not being computed for floppies, since the following get_bytes would
     fail, so the superblock was not being read. The getbytes failed because
     the fpeof in tempfib was 0, giving ioresult = ieof. Fpeof was 0 because
     umaxbytes was 0 at this point, and unblockeddam (called by setuptempfib,
     called by set_unit) merely copies umaxbytes -> fpeof. Umaxbytes is 0
     for floppies at this point because CTABLE assign_floppy_pair passes in 0
     to tea_CS80_sv, which passes it to p_umaxbytes in tea, whereas the CTABLE
     { local hard discs } { section calls get_CS80_parms, and passes mb in to
     tea_CS80_mv.
     See CTABLE for comments on clearunit being used to set umaxbytes. The
     clearunit call in assign_and_clear_unit happens too late for the need
     described here.
     The bug symptom was the cache getting smashed when a floppy had a larger
     cgroup cache record size requirement than any disc interrogated during
     CTABLE (only hard discs were being interrogated). There is a related fix
     in get_buf, to refuse a too-large read_buffer request.
     SFB/LAF
    }

    ioresult := ord(inoerror);                                          {SFB}
    f.funit := unum;                                                    {SFB}
    call(unitable^[unum].tm, addr(f), clearunit, unum, 0, 0);           {SFB}

    unum := set_unit(unum);
    try
	get_bytes(unum, sizeof(fs^), SBLOCK*DEV_BSIZE, windowp(fs));
    recover
	if (ioresult = ord(zmediumchanged))
	and not unitable^[unum].umediavalid then begin
	    try
		ioresult := ord(inoerror);
		get_bytes(unum, sizeof(fs^), SBLOCK*DEV_BSIZE, windowp(fs));
	    recover
		goto 999;
	end
	else    goto 999;

    { is it HFS? }
    with fs^ do
    if (magic <> FS_MAGIC)
	or (fsize * frag <> bsize)
	or (minfree < 0)
	or (minfree > 100)
	or (sbsize < sizeof(fs^))
	or (cpg * ncg < ncyl) then
	    goto 999;

    with fs^ do
	{ size * fsize must fit in 31 bits (disk byte addressable) }
	if (size < 0) or (maxint div size < fsize) then
	    goto 999;

    is_hfs := true;

    { sb = sb_bytes + cs_bytes }
    item_size[I_SB] := max(item_size[I_SB],
			   actual_sb_size(fs) + actual_cs_size(fs));
    item_size[I_CG] := max(item_size[I_CG],
			   actual_cg_size(fs));

999:
    if is_hfs or force then
	init_support_unit(unum, is_hfs and (fs^.clean <> chr(FS_CLEAN)));
end;


{
{ configure_cache
{ Here we allocate the cache memory to the buffer headers.
{ The algorithm is as follows:
{ We have a replicable unit consisting of unit_count[i] copies
{ of item i.
{ We allocate this unit as many times as fits.
{ We may have to adjust by maximum counts on some items.
{ If this unit doesn't fit at all, then we take min_count[i] copies.
{ If even that won't fit, we panic.
{ Each buffer item is preceded by a pointer back to its buffer header.
{ This is not included in the various types, so we have
{ to add room for it here.
}
procedure configure_cache;
label
    999;
var
    unit_bytes: integer;
    factor: integer;
    half_extra, extra_bytes: integer;
    i: integer;
    cache_address: ipointer;
begin
    if not initialized then
	goto 999;

    {
    { supply defaults if never could look at the disc.
    { The defaults are small, because this only happens in the
    { case of floppies, where TABLE can force an assignment of
    { HFSDAM even when the disc isn't in the drive yet.
    }
    if item_size[I_SB] = 0 then begin
	item_size[I_SB] := 3*1024;
	item_size[I_CG] := 2*1024;
    end;

    { account for back pointer for each type, plus header }
    for i := 1 to NUM_ITEMS do
	buf_size[i] := item_size[i] + bytes_per_ptr + sizeof(buf_hdr_type);

    { how many bytes in the unit? }
    unit_bytes := 0;
    for i := 1 to NUM_ITEMS do
	unit_bytes := unit_bytes + (unit_count[i] * buf_size[i]);

    { how many times does it fit in the cache? }
    factor := cache_bytes div unit_bytes;
    if factor > 0 then
	{ unit can be replicated }
	for i := 1 to NUM_ITEMS do
	    buf_count[i] := min(factor * unit_count[i], max_count[i])
    else
	{ Not enough space to replicate unit }
	for i := 1 to NUM_ITEMS do
	    buf_count[i] := min_count[i];

    { calculate extra bytes }
    extra_bytes := cache_bytes;
    for i := 1 to NUM_ITEMS do
	extra_bytes := extra_bytes - (buf_count[i] * buf_size[i]);

    { if none, panic }
    if extra_bytes < 0 then begin
	initialized := false;
	goto 999;
    end;

    { give half of the extra space to the datablks }
    half_extra := extra_bytes div 2;
    extra_bytes := half_extra;
    allocate_extra(half_extra, I_DATABLK);

    { the other half goes to the inodes }
    extra_bytes := extra_bytes + half_extra;
    allocate_extra(extra_bytes, I_INODE);

    { now allocate the cache space }
    cache_address := ipointer(cache_space);
    for i := 1 to NUM_ITEMS do begin
	assign_space(cache_address, buf_count[i], buf_size[i], i);
    end;

999:
    {
    { reinit enough vars so can run TABLE again (or for the first time)
    { (TABLE calls init_hfs_unit + configure_cache)
    }
    item_size[I_SB]      := 0;
    item_size[I_CG]      := 0;
end;


{
{ sync the cache by flushing all dirty buffers
}
procedure sync;
label
    999;
var
    i, j: integer;
    buf_hdr: buf_hdr_ptr_type;
    tmpioresult: integer;
begin
    if not initialized then
	goto 999;
    tmpioresult := ioresult;
    for i := 1 to NUM_ITEMS do
	for j := 1 to buf_count[i] do begin
	    buf_hdr := addr(buf_hdrs_array[i]^[j]);
	    with buf_hdr^ do begin
		use_count := 0;
		{fix for cacher bug with 2 HFS units and LIBRARIAN. SFB 6-16-87}
		if is_dirty and (unit=current_unit) {SFB} then begin
		    try
			write_buf(buf_hdr);
		    recover begin
			if tmpioresult = ord(inoerror) then
			    tmpioresult := ioresult;
			invalidate_unit(unit);
		    end;
		    is_dirty := false;
		end;
	    end;
	end;
999:
    ioresult := tmpioresult;
end;

$if FALSE$
{
{ check_cache
{ debug routine
{ caller asserts that all buffers have been released.
}
{ Don't enable except for test. SFB
procedure check_cache;
label
    999;
var
    i, j: integer;
begin
    if not initialized then
	goto 999;
    for i := 1 to NUM_ITEMS do
	for j := 1 to buf_count[i] do
	    if buf_hdrs_array[i]^[j].use_count <> 0 then
		writeln('BUFFER STILL IN USE, type ', i:1, ' # ', j:1);
999:
end;
}

{
{ print the cache statistics
}
procedure cache_stats;
label
    999;
var
    i: integer;
begin
    if not initialized then
	goto 999;
    for i := 1 to NUM_ITEMS do begin
	if i = I_INODE then
	    write('inodes  ')
	else
	if i = I_SB then
	    write('supers  ')
	else
	if i = I_CG then
	    write('cgroups ')
	else
	if i = I_DATABLK then
	    write('data    ');
	write('(', buf_count[i]:2, ' bufs) ');
	write(cache_lazy[i]:4, ' L, ');
	write(cache_flush[i]:4, ' F, ');
	if cache_lazy[i] + cache_flush[i] = 0 then
	    write('L/F+L  0%, ')
	else
	    write('L/F+L ', ((cache_lazy[i]*100)
		div (cache_lazy[i] + cache_flush[i])):2, '%, ');
	write(cache_hit[i]:4, ' H, ');
	write(cache_miss[i]:4, ' M, ');
	if cache_hit[i] + cache_miss[i] = 0 then
	    writeln('hit rate  0%')
	else
	writeln('hit rate ', ((cache_hit[i]*100)
		div (cache_hit[i] + cache_miss[i])):2, '%');
    end;
    for i := 1 to NUM_ITEMS do begin
	cache_hit[i] := 0;
	cache_miss[i] := 0;
	cache_lazy[i] := 0;
	cache_flush[i] := 0;
    end;

999:
end;
$end$

{
{ invalidate_unit
{ invalidate (no flush) all buffers on this unit.
{ if this unit is ALL_UNITS, then we invalidate on every unit.
{ if BASE_UNIT, then use current_unit.
}
procedure invalidate_unit(badunit: integer);
label
    999;
var
    i, j: integer;
begin
    if not initialized then
	goto 999;
    { force use of base unit }
    if badunit <> ALL_UNITS then
	badunit := h_unitable^.tbl[badunit].base_unum;
    for i := 1 to NUM_ITEMS do
	for j := 1 to buf_count[i] do
	    with buf_hdrs_array[i]^[j] do
		if (badunit = ALL_UNITS) or (badunit = unit) then begin
		    unit := NO_UNIT;
		    is_dirty := false;
		    use_count := 0;
		end;
999:
end;


{----------------------------------------------------------------}
{
{ EXPORTED DATA ACCESS ROUTINES
}

{
{ get a superblock
{ We check the disc with every get_superblock to see if it's
{ still OK.  If not, we invalidate the unit, and return nil.
}
function  get_superblock(unum: shortint): super_block_ptr_type;
var
    fs: super_block_ptr_type;
begin
    unum := set_unit(unum);
    try
      fs := super_block_ptr_type(get_buf(unum,
				       SBLOCK*DEV_BSIZE,
				       0,
				       I_SB));
    recover
	fs := nil;

    if (fs = nil) then begin
	invalidate_unit(unum);
	medium_gone;
    end;
    current_unit := unum;
    current_super := fs;
    get_superblock := fs;
end;


{
{ put superblock back
}
procedure put_superblock(fs: super_block_ptr_type;
			 how: cache_action_set);
begin
    if (fs <> nil) and (fs^.fmod = FS_MODIFIED) then begin
	how := how + [dirty];
	fs^.fmod := FS_NOT_MODIFIED;
    end;
    put_buf(fs, how);
end;


{
{ get a cgroup
}
function  get_cgroup(groupnum: integer): cgroup_ptr_type;
begin
    get_cgroup :=
	cgroup_ptr_type(get_buf(current_unit,
				cgroup_start(current_super, groupnum),
				actual_cg_size(current_super),
				I_CG));
end;


{
{ put cgroup back
}
procedure put_cgroup(cgp: cgroup_ptr_type;
		     how: cache_action_set);
begin
    put_buf(cgp, how);
end;


{
{ get an inode
}
function  get_inode(nodenum: integer): inode_ptr_type;
var
    ip: inode_ptr_type;
begin
    get_inode := nil;   {ensure no random garbage. iptrs are usually
			 instantiated from the stack. SFB}
    with current_super^ do
	if (nodenum < 1) or (nodenum > ncg*ipg) then begin
	    ioresult := ord(znosuchblk);
	    escape(-10);
	end;
    ip := inode_ptr_type(get_buf(current_unit,
				 inode_start(current_super, nodenum),
				 sizeof(inode_type),
				 I_INODE));
    if ip <> nil then
	binode_ptr_type(ip)^.inumber := nodenum;
    get_inode := ip;
end;


{
{ put inode back
}
procedure put_inode(ip: inode_ptr_type;
		    how: cache_action_set);
begin
    put_buf(ip, how);
end;


{
{ get_data1
{ subroutine of get_datablk and get_edatablk
{ we want the cache block that contains the byte addressed
{ by offset, where this is the offset within the given frag.
}
function  get_data1(fragnum: frag_type;
		    offset: integer;
		    item: integer): cache_blk_ptr_type;
var
    daddr: integer;
begin
    daddr := rounddownp2(fragstobytes(current_super, fragnum) + offset,
			 cache_blk_size);
    get_data1 :=
	cache_blk_ptr_type(get_buf(current_unit,
				   daddr,
				   cache_blk_size,
				   item));
end;


{
{ get a datablk
}
function  get_datablk(fragnum: frag_type;
		      offset: integer): cache_blk_ptr_type;
begin
    get_datablk := get_data1(fragnum, offset, I_DATABLK);
end;


{
{ get a datablk, but don't read the disc if it isn't in
{ the cache, because we're about to overwrite it anyway.
}
function  get_edatablk(fragnum: frag_type;
		       offset: integer): cache_blk_ptr_type;
begin
    get_edatablk := get_data1(fragnum, offset, I_EDATABLK);
end;


{
{ put datablk back
}
procedure put_datablk(dp: cache_blk_ptr_type;
		      how: cache_action_set);
begin
    put_buf(dp, how);
end;


{
{ Miscellany
}

{
{ Given an inode ptr, return the inode number.
}
function inumber(iptr: inode_ptr_type): integer;
begin
    inumber := binode_ptr_type(iptr)^.inumber;
end;

{
{ return the inode type (IFREG, IFDIR, etc).
}
function itype(ip: inode_ptr_type): integer;
begin
    itype := binand(ip^.mode, IFMT);
end;

end.


@


55.1
log
@Automatic bump of revision number for PWS version 3.25A
@
text
@@


54.1
log
@Automatic bump of revision number for PWS version 3.24
@
text
@@


53.1
log
@Automatic bump of revision number for PWS version 3.24B
@
text
@@


52.1
log
@Automatic bump of revision number for PWS version 3.24A
@
text
@@


51.1
log
@Automatic bump of revision number for PWS version 3.24d
@
text
@@


50.1
log
@Automatic bump of revision number for PWS version 3.23c
@
text
@@


49.1
log
@Automatic bump of revision number for PWS version 3.24b
@
text
@@


48.1
log
@Automatic bump of revision number for PWS version 3.24a
@
text
@@


47.1
log
@Automatic bump of revision number for PWS version 3.23
@
text
@@


46.1
log
@Automatic bump of revision number for PWS version 3.23
@
text
@@


45.1
log
@Automatic bump of revision number for PWS version 3.23C
@
text
@@


44.1
log
@Automatic bump of revision number for PWS version 3.23B
@
text
@@


43.1
log
@Automatic bump of revision number for PWS version 3.23aA
@
text
@@


42.1
log
@Automatic bump of revision number for PWS version 3.23e
@
text
@@


41.1
log
@Automatic bump of revision number for PWS version 3.23d
@
text
@@


40.1
log
@Automatic bump of revision number for PWS version 3.23c
@
text
@@


39.1
log
@Automatic bump of revision number for PWS version 3.23b
@
text
@@


38.1
log
@Automatic bump of revision number for PWS version 3.23a
@
text
@@


37.1
log
@Automatic bump of revision number for PWS version 3.3a
@
text
@@


36.1
log
@Automatic bump of revision number for PWS version 3.22
@
text
@@


35.1
log
@Automatic bump of revision number for PWS version 3.22
@
text
@@


34.1
log
@Automatic bump of revision number for PWS version 3.22
@
text
@@


33.1
log
@Automatic bump of revision number for PWS version 3.22D
@
text
@@


32.1
log
@Automatic bump of revision number for PWS version 3.22C
@
text
@@


31.1
log
@Automatic bump of revision number for PWS version 3.22B
@
text
@@


30.1
log
@Automatic bump of revision number for PWS version 3.22A
@
text
@@


29.1
log
@Automatic bump of revision number for PWS version 3.22b
@
text
@@


28.2
log
@I put in changes which try to fix the cache corruption bug encountered
with floppies with odd HFS parameters. The code is recovered from work
Larry Fenske and I did summer of '87, but is not desk-checked, due to
difficulties in building the .CODE. Look for comments containing {SFB}
or SFB/LAF. Two areas changed. Scott Bayes.
@
text
@@


28.1
log
@Automatic bump of revision number for PWS version 3.3b
@
text
@d730 23
a752 1
    if do_read then begin
d954 1
d966 26
@


27.1
log
@Automatic bump of revision number for PWS version 3.3a
@
text
@@


26.1
log
@Automatic bump of revision number for PWS version 3.3 Synch
@
text
@@


25.1
log
@Automatic bump of revision number for PWS version 3.2Y
@
text
@@


24.1
log
@Automatic bump of revision number for PWS version 3.2
@
text
@@


23.1
log
@Automatic bump of revision number for PWS version 3.2P
@
text
@@


22.1
log
@Automatic bump of revision number for PWS version 3.2N
@
text
@@


21.1
log
@Automatic bump of revision number for PWS version 3.2M
@
text
@@


20.2
log
@Fixes for cc.CODE bug. Actually for making it robust when running
with too little memavail. Made sure get_inode returns nil or ^buffer.
Added otherwise in read/write_buf case stmts. Redid use_count
decrementing. Added nuke_unit, and calls to it. Fixed up recover
blocks for escapecode <> -10, to ensure ioresult is set. Changed age
and pclock to integers. Note that $stackcheck is ON.
@
text
@@


20.1
log
@Automatic bump of revision number for PWS version 3.2L
@
text
@a90 5
$if 1=0$
var
    intrace, outtrace: boolean;
$end$

a118 4
$if 1=0$
procedure check_cache;
procedure cache_stats;
$end$
d148 1
a152 2
    debug = false;

d172 1
a172 1
	age: ushort;
d251 1
a251 1
    pclock: ushort;
a252 8
$if debug$
    { cache statistics }
    cache_hit: item_info_type;
    cache_miss: item_info_type;
    cache_lazy: item_info_type;
    cache_flush: item_info_type;
$end$

a374 1

a406 4
$if debug$
    if intrace then
	report('READ DISK');
$end$
d459 3
a487 4
$if debug$
		if outtrace then
		    reportn('WRITE inode ',  binode_ptr_type(iptr)^.inumber);
$end$
a502 8
$if debug$
		if outtrace then
		    if item = I_CG then
			reportn('WRITE cg ', cgptr^.cgx)
		    else
			reportn('WRITE datablk ',
				disc_addr div current_super^.fsize);
$end$
a509 4
$if debug$
		if outtrace then
		    report('WRITE superblock');
$end$
d519 3
a522 3
$if debug$
	cache_flush[item] := cache_flush[item] + 1;
$end$
d542 3
d550 1
a550 1
			var invalid_hdr: buf_hdr_ptr_type): cache_blk_ptr_type;
d577 2
a578 7
$if debug$
		{ debugging check }
		if (use_count > 1) and xprintmesg then
		    writeln('USE COUNT: ', use_count:1,'  ITEM ',this_item:1);
		cache_hit[this_item] := cache_hit[this_item] + 1;
$end$
		block_in_cache := dptr;
d605 10
d616 32
d649 2
d669 1
a669 1
    in_cache_buf: cache_blk_ptr_type;
d688 2
a689 1
	get_buf := in_cache_buf;
d707 5
d713 10
d734 2
a735 1
	    ;
a740 3
$if debug$
	cache_miss[this_item] := cache_miss[this_item] + 1;
$end$
a781 4
$if debug$
	    if not (immediate in how) then
		cache_lazy[item] := cache_lazy[item] + 1;
$end$
d786 4
a789 1
	    write_buf(buf_hdr);
d799 4
a802 1
		    if ioresult = ord(zprotected) then
d808 6
a813 5
$if debug$
	    { debugging check }
	    if use_count = 0 then
		writeln('TOO MANY BUFFER RELEASES');
$end$
d819 4
a824 4
	    if use_count = 0 then begin
		pclock := pclock + 1;
		age := pclock;
	    end;
a904 8
$if debug$
    for i := 1 to NUM_ITEMS do begin
	cache_hit[i] := 0;
	cache_miss[i] := 0;
	cache_lazy[i] := 0;
	cache_flush[i] := 0;
    end;
$end$
a1066 5
$if debug$
	if xprintmesg then
	    writeln('item ', i:1, ': ', buf_count[i]:1,
		' buffers, size ', buf_size[i]:1);
$end$
a1068 3
$if debug$
    if xprintmesg then for i := 1 to 600000 do ;
$end$
d1116 1
a1116 1
$if debug$
d1122 1
d1137 1
a1138 1

a1200 3
$if debug$
    xreportn('INVALIDATE UNIT ', badunit);
$end$
a1231 4
$if debug$
    if intrace then
	report('get_superblock');
$end$
a1241 3
$if debug$
	xreportn('nil superblock, iores', ioresult);
$end$
a1269 4
$if debug$
    if intrace then
	reportn('get_cgroup ', groupnum);
$end$
d1295 2
a1296 4
$if debug$
    if intrace then
	reportn('get_inode ', nodenum);
$end$
a1349 4
$if debug$
    if intrace then
	reportn('get_datablk ', fragnum + offset div current_super^.fsize);
$end$
a1360 4
$if debug$
    if intrace then
	reportn('get_edatablk ', fragnum + offset div current_super^.fsize);
$end$
@


19.3
log
@Added fix to keep use_count <- -1 from corrupting fields is_dirty and
item (setting them to T and 7 resp.). This bug caused multiple
releases to corrupt discs sometimes. Multiple releases are acceptable,
as during recover, we often blindly release ALL buffers, whether laready
released or not.
@
text
@@


19.2
log
@Fix to disallow sync to flush dirty records for units other than
current_unit. Related to hfstm no-sync of dirty records optimization
@
text
@d791 7
a797 1
	    use_count := use_count - 1;
@


19.1
log
@Automatic bump of revision number for PWS version 3.2K
@
text
@d1086 2
a1087 1
		if is_dirty then begin
@


18.1
log
@Automatic bump of revision number for PWS version 3.2J
@
text
@@


17.1
log
@Automatic bump of revision number for PWS version 3.2I+
@
text
@@


16.1
log
@Automatic bump of revision number for PWS version 3.2I
@
text
@@


15.1
log
@Automatic bump of revision number for PWS version 3.2H
@
text
@@


14.1
log
@Automatic bump of revision number for PWS version 3.2G
@
text
@@


13.1
log
@Automatic bump of revision number for PWS version 3.2F
@
text
@@


12.1
log
@Automatic bump of revision number for PWS version 3.2E
@
text
@@


11.2
log
@Fix for FSDat00812 -- 9133H microfloppy buffers confused.
@
text
@@


11.1
log
@Automatic bump of revision number for PWS version 3.2D
@
text
@d1193 1
a1193 1
	badunit := current_unit;
@


10.3
log
@Re-fix for FSDat00573. Always look at user's value of simultaneous_hfs_units
whether or not cache has been allocated.
@
text
@@


10.2
log
@Fix for FSDat00852
@
text
@a831 1
    max_supers := MAX_SBS;
d833 2
a836 1
	user_ip := urec_ptr(value('HFS_USER_CACHE_INFO'));
d839 1
a839 1
	else begin
a840 2
	    max_supers := max(MIN_SBS, user_ip^.simultaneous_hfs_discs);
	end;
d849 5
@


10.1
log
@Automatic bump of revision number for PWS version 3.2C
@
text
@d1249 1
a1249 1
    if (fs <> nil) and (release in how) and (fs^.fmod = FS_MODIFIED) then begin
@


9.2
log
@Fix FSDat00573 -- Use user's value for simultaneous_hfs_units
@
text
@@


9.1
log
@Automatic bump of revision number for PWS version 3.2B
@
text
@d829 1
d832 1
d839 1
a839 1
	else
d841 2
d854 1
a854 1
    max_count[I_SB]      := MAX_SBS;
@


8.4
log
@FSDat00638 -- init_hfs retries read based on umediavalid. This fixes
bug in which turning a disk on and off could make it look like LIF
after running TABLE
@
text
@@


8.3
log
@Fixes FSDat00563. Put time stamp in superblock whenever it is written
to disk.
@
text
@d919 9
a927 1
	goto 999;
@


8.2
log
@Fixes FSDat00653
read_buf accepts superblock if fs_clean is bad, but still sets
corrupt bit in h_unitable
@
text
@d546 1
@


8.1
log
@Automatic bump of revision number for PWS version 3.2A
@
text
@d462 1
a462 1
		if fsptr^.clean <> chr(FS_CLEAN) then begin
a463 3
		    ioresult := ord(icorrupt);
		    goto 999;
		end;
@


7.1
log
@Automatic bump of revision number for PWS version 3.2l
@
text
@@


6.1
log
@Automatic bump of revision number for PWS version 3.2k
@
text
@@


5.1
log
@Automatic bump of revision number for PWS version 3.2j
@
text
@@


4.4
log
@Use value() to lookup hfs_user exported values.
@
text
@@


4.3
log
@Don't recognize as HFS if too big (disk byte addresses
must fit in integer).
@
text
@d24 1
a24 1
$search 'hfstuff', 'hfscalc', 'hfsupport', 'hfs_user'$
d33 1
a33 2
    iocomasm,
    hfs_user;
d221 3
d822 6
d830 1
d836 6
a841 1
	cache_bytes := max(MIN_CACHE_BYTES, user_cache_bytes);
@


4.2
log
@If read superblock and magic number bad, set ioresult to
inodirectory (was icorrupt).
@
text
@d919 5
@


4.1
log
@Automatic bump of revision number for PWS version 3.2i
@
text
@d456 5
a460 2
		if (fsptr^.magic <> FS_MAGIC)
		or (fsptr^.clean <> chr(FS_CLEAN)) then begin
@


3.2
log
@Turn off debugging stuff with $if debug$ (now off).
A little cleanup.
Get user_cache_bytes by importing hfs_user.
@
text
@@


3.1
log
@Automatic bump of revision number for PWS version 3.2h
@
text
@a0 1
$linenum 7000$
a1 1
$partial_eval$
d3 9
a20 3
$range off$
$ovflcheck off$
$debug off$
d22 1
d24 1
a24 1
$search 'hfstuff', 'hfscalc', 'support'$
a25 2
module hfscache;

d33 2
a34 1
    iocomasm;
d50 2
d75 3
a77 3
{  1.  Fixed-size block allocated by init_cache.  "cache_bytes" is size
{       in bytes of cache.  This size is now built-in.  It will be
{       changed so it is user-configurable.
d81 1
a81 1
{       by TABLE, probably.
d83 1
a83 1
{       allocated in step 1.  It is probably also called by TABLE
d92 1
d95 1
d125 1
d128 1
d161 2
a162 2
type
    integer_ptr_type = ^integer;
a163 1
const
a218 3
    { size of entire cache in bytes.  SHOULD COME FROM USER. }
    user_cache_bytes = 20*1024;

d261 1
d267 1
d336 1
a336 1
procedure assign_space(var cache_address: integer_ptr_type;
d424 1
d427 1
d506 1
d509 1
d525 1
d532 1
d540 1
d543 1
d553 1
d555 1
d607 1
d612 1
d715 1
d717 1
d741 1
a741 1
procedure put_buf(anyvar buf_addr: integer_ptr_type;
d759 1
d762 1
d783 1
d787 1
d854 1
d861 1
d952 1
a952 1
    cache_address: integer_ptr_type;
d1009 1
a1009 1
    cache_address := integer_ptr_type(cache_space);
d1011 1
d1015 1
d1018 1
d1020 1
d1067 1
a1067 1

d1137 1
a1138 1

d1151 1
d1153 1
d1185 1
d1188 1
d1199 1
d1201 1
d1230 1
d1233 1
d1259 1
d1262 1
d1316 1
d1319 1
d1331 1
d1334 1
d1371 1
a1371 11
{
{ CHANGES STILL NEEDED:
{   1.  get user_cache_bytes from user
{   2.  grand tuning
{   3.  err recovery
{ NEW release 3
{       markuser in init_cache
{       init_cache calls configure_cache
{       configure_cache reinits
{       "reuse" added to cache actions
}
@


2.11
log
@If stamping, don't reset ioresult to inoerror after write escape
unless ioresult was zprotected.
@
text
@@


2.10
log
@Add put_* param "stamping", true when
flushing out time stamps.  Object -- prevent write errors
on read-only disks.
@
text
@d753 2
a754 1
		    ioresult := ord(inoerror);
@


2.9
log
@Keep linked list of buffers, most recently used first.
We have a list for each buffer type.
@
text
@d96 1
a96 1
    cache_action_type = (release, dirty, immediate, invalid);
d746 10
@


2.8
log
@Accidentally deleted "end." in last delta.
@
text
@d167 1
a168 1
	age: integer;
d174 2
a175 1
	max_size: integer;
d177 1
a184 1
    buf_hdr_ptr_type = ^buf_hdr_type;
d228 3
d253 1
a253 1
    pclock: integer;
d285 1
a285 1
    actual_sb_size := roundup(sb_bytes, DISC_SECTOR);
d298 1
a298 1
    actual_cs_size := roundup(cs_bytes, DISC_SECTOR);
d312 1
a312 1
    actual_cg_size := roundup(cg_bytes, DISC_SECTOR);
d337 1
d351 4
d409 1
a409 1
    sector_buf: packed array[1..DISC_SECTOR] of char;
d416 2
d424 1
a424 1
		sector_addr := rounddownp2(disc_addr, DISC_SECTOR);
d426 1
a426 1
		get_bytes(unit, DISC_SECTOR, sector_addr, addr(sector_buf));
d487 1
a487 1
    sector_buf: packed array[1..DISC_SECTOR] of char;
d497 1
a497 1
		    writeln('write inode ',  binode_ptr_type(iptr)^.inumber:1);
d499 1
a499 1
		sector_addr := rounddownp2(disc_addr, DISC_SECTOR);
d501 1
a501 1
		get_bytes(unit, DISC_SECTOR, sector_addr, addr(sector_buf));
d508 1
a508 1
		put_bytes(unit, DISC_SECTOR, sector_addr, addr(sector_buf));
d515 1
a515 1
			writeln('write cg ', cgptr^.cgx:1)
d517 2
a518 1
			writeln('write datablk ', disc_addr:1);
d527 1
a527 1
		    writeln('write superblock');
d555 2
d567 1
a567 2
    buf_array: buf_hdr_array_ptr_type;
    buf_hdr: buf_hdr_ptr_type;
a577 1
    buf_array := buf_hdrs_array[this_item];
d580 5
a584 2
    for i := 1 to buf_count[this_item] do begin
	buf_hdr := addr(buf_array^[i]);
d593 7
a599 1
		block_in_cache := buf_hdr^.dptr;
d613 2
d616 1
a616 1
    end;
d1057 1
a1057 2
    if printmesg then
     for i := 1 to NUM_ITEMS do begin
d1085 7
d1141 1
a1141 1
	writeln('get_superblock');
d1182 1
a1182 1
	writeln('get_cgroup ', groupnum:1);
d1209 1
a1209 1
	writeln('get_inode ', nodenum:1);
d1264 1
a1264 1
	writeln('get_datablk ', fragnum);
d1277 1
a1277 1
	writeln('get_edatablk ', fragnum);
a1289 13

$if 1=0$
{-------------------------------------------------------------------------}
{
{ obsolescent routines called by older code
}
function  get_data     (blocknum : integer; devblocknum : shortint) :
			dev_block_ptr_type ;
begin
    get_data := dev_block_ptr_type(get_datablk(blocknum,
					       devblocknum*DEV_BSIZE));
end;
$end$
@


2.7
log
@get_superblock no longer calls check_disk_status;
this is too slow and is now done only in getvolumename.
cache size is now 20K.
Bug in debugging check for too many buffer releases.
@
text
@d1296 1
@


2.6
log
@in put_buf, don't check for use_count < 0 after decrement.
Reason: use_count is 0..255.  Instead, check for 0 before decrement.
@
text
@a13 2
{
{ enable when done
d16 1
a16 1
}
a17 1
$debug on$                 {debug OFF when finished}
d109 1
d210 1
a210 1
    user_cache_bytes = 10*1024;
d1114 6
a1119 13
    report('after set_unit');
    check_disc_status;
    reportn('after check disc status, iores', ioresult);
    if ioresult = ord(inoerror) then begin
	try
	  fs := super_block_ptr_type(get_buf(unum,
					   SBLOCK*DEV_BSIZE,
					   0,
					   I_SB));
	recover
	    fs := nil;
    end
    else
d1123 1
a1123 1
	reportn('nil superblock, iores', ioresult);
a1295 1
end.
@


2.5
log
@pass arg to init_support_unit telling whether FS is corrupt.
True only when superblock is OK, but fs_clean is bad.
@
text
@d727 3
a734 3
	    { debugging check }
	    if use_count < 0 then
		writeln('TOO MANY BUFFER RELEASES');
@


2.4
log
@Change a few xreports to reports.  Debugging printout changes only.
@
text
@d866 1
a866 1
	init_support_unit(unum);
@


2.3
log
@try/recover around read_buf in get_buf so can reset buffer hdr
if read_buf fails by escaping.
@
text
@d1116 1
a1116 1
    xreport('after set_unit');
d1118 1
a1118 1
    xreportn('after check disc status, iores', ioresult);
@


2.2
log
@reset_prefix -> medium_gone
@
text
@d665 4
a668 1
	read_buf(buf_hdr);
@


2.1
log
@Auto bump rev number to 2.1 for sys 3.2e.
@
text
@d1131 1
a1131 1
	reset_prefix(fs);
@


1.21
log
@Call set_corrupt if sb or cg not OK.
@
text
@@


1.20
log
@Add "force" 2nd parameter to init_hfs_unit
for use by TABLE with floppies that may not be in drive.
@
text
@d427 2
a428 1
		and (cgptr^.magic <> CG_MAGIC) then
d430 1
d439 1
@


1.19
log
@replace writelns with xreports.
Some changes to allow TABLE to run reasonably.
@
text
@d116 1
a116 1
procedure init_hfs_unit(unum: integer);
d816 1
a816 1
procedure init_hfs_unit(unum: integer);
d859 1
a859 1
    if is_hfs {or force} then
@


1.18
log
@get_* used to escape on ioresult.  Now it only escapes if the
ioresult happened during the get_*, otherwise it just gives
back the reqeuested item.
@
text
@d116 1
a116 1
function  init_hfs_unit(unum : shortint): boolean;
d121 1
a121 1
procedure invalidate_unit(badunit: shortint);
d262 2
a263 2
    { has cache been properly configured? }
    configure_error: boolean;
d556 2
a557 1
    if configure_error then begin
d574 1
a574 1
		if (use_count > 1) then
d756 2
a757 1
    configure_error := false;
d764 1
d766 1
a766 1
	    configure_error := true;
a798 4
    { These numbers suffice for all HP discs as long as the file
    { systems are made with the default number of cylinders per group.
    { If TABLE later calls init_hfs_unit and configure_cache, the
    { information about superblock and cgroup size is recalculated.
a799 2
    item_size[I_SB] := 4*1024;
    item_size[I_CG] := 2*1024;
d813 2
a814 1
{ returns whether we think it's an HFS unit
d816 1
a816 1
function init_hfs_unit(unum: shortint): boolean;
d822 1
d824 1
a824 1
    init_hfs_unit := false;
d826 1
a826 1
    if configure_error then begin
d835 3
a837 2
    get_bytes(unum, sizeof(fs^), SBLOCK*DEV_BSIZE, windowp(fs));
    if ioresult <> ord(inoerror) then
d850 2
a857 4
    init_support_unit(unum);

    init_hfs_unit := true;

d859 2
d888 1
a888 1
    if configure_error then
d891 11
d929 1
a929 1
	configure_error := true;
d945 2
a946 1
	writeln('item ', i:1, ': ', buf_count[i]:1,
d950 1
a950 1
    for i := 1 to 600000 do ;
d973 1
a973 1
    if configure_error then
d1009 1
a1009 1
    if configure_error then
d1028 1
a1028 1
    if configure_error then
d1069 1
a1069 1
procedure invalidate_unit(badunit: shortint);
d1075 2
a1076 2
i := ioresult; writeln('INVALIDATE UNIT ', badunit); ioresult := i;
    if configure_error then
d1110 1
d1112 1
d1126 1
@


1.17
log
@Remove reference to io_to_file, now dead.
@
text
@d616 1
d619 2
a634 2
{if this_item = I_DATABLK then
reportn('get_datablk in cache', addr_wanted);}
d672 4
d678 1
d823 1
d938 1
a938 1
    for i := 1 to 1000000 do ;
d1063 1
a1063 1
writeln('INVALIDATE UNIT ', badunit);
@


1.16
log
@init_cache_unit -> init_hfs_unit.
call init_support_unit in init_hfs_unit.
add input and output tracing for debugging.
@
text
@a787 4
$if io_to_file$
    writeln('IO TO FILE');
$end$

@


1.15
log
@Add writelnb for when unit is invalidated (debug).
@
text
@d40 1
a40 1
{  2.  call init_cache_unit for every hfs unit
d75 1
a75 1
{  2.  init_cache_unit looks at the beginning of the superblock and
d89 3
d116 1
a116 1
function  init_cache_unit(unum : shortint): boolean;
a145 7
{ routines for compatibility with older code.
{ These will presumably disappear.
}
function  get_data     (blocknum : integer; devblocknum : shortint) :
			dev_block_ptr_type ;

{
d484 2
d501 5
d513 2
d796 1
a796 1
    { If TABLE later calls init_cache_unit and configure_cache, the
d807 1
a807 1
{ init_cache_unit
d816 1
a816 1
function init_cache_unit(unum: shortint): boolean;
d822 1
a822 1
    init_cache_unit := false;
a843 3
	{
	or (ipg * frag <> fpg * inopb)
	}
d853 1
a853 1
    init_cache_unit := true;
d855 2
d940 1
a940 1
    { (TABLE calls init_cache_unit + configure_cache)
d1092 2
d1137 2
a1138 1
    {write('get_cgroup ', groupnum:1, '  ');}
d1164 2
a1204 1
    {writeln('get datablk ', daddr:1, '  ');}
d1219 2
d1232 2
d1247 1
d1258 1
@


1.14
log
@Cehck inodenum in get_inode.
Tweak cacher params to get more inodes.
get_superblock now calls check_disc_status in all cases.
@
text
@d1056 1
@


1.13
log
@Use sysgmttime instead of homegrown routine.  Also,
don't stamp inode chg time when writing inode out.
.,
@
text
@d162 3
a164 3
    I_INODE = 1;
    I_SB = 2;
    I_CG = 3;
d200 1
a200 1
    MAX_INODES = 30;
d206 2
a207 2
    UNIT_INODES = 5;
    UNIT_DATABLKS = 8;
a437 5
		{
		writeln('read_buf getsuper, disc_addr ', disc_addr:1,
			'  size ',min(SBSIZE, max_size):1,'  unit ',unit:1);
		writeln('data at buffer ',dptr^[0]:1, dptr^[1]:1);
		}
a438 6
		{
		writeln('read_buf getsuper, success');
		writeln('data at buffer ',dptr^[0]:1, dptr^[1]:1);
		writeln('FS_MAGIC ',fsptr^.magic:1,
			'   FS_CLEAN ',ord(fsptr^.clean):1);
		}
d877 1
a877 1
    extra_bytes: integer;
d915 4
a918 2
    { give the extra bytes to the datablk area }
    allocate_extra(extra_bytes, I_DATABLK);
d920 2
a921 1
    { throw the scraps to the inodes }
d926 6
a931 4
    for i := 1 to NUM_ITEMS do
	begin
	 assign_space(cache_address, buf_count[i], buf_size[i], i);
	end;
d1080 2
a1081 8
{ We check the disc with every get_superblock to see if the
{ medium has changed.  If so, we invalidate the cache and try
{ again.  This procedure happens slightly differently in two
{ cases:
{ case 1: superblock in cache
{       call check_disc_changed
{ case 2: superblock not in cache
{       call get_super_bytes
a1083 2
label
    999;
a1085 1
    dummy1, dummy2: buf_hdr_ptr_type;
d1088 12
a1099 5
    fs := super_block_ptr_type(block_in_cache(unum,
					      SBLOCK*DEV_BSIZE,
					      I_SB,
					      dummy1,
					      dummy2));
a1100 16
    if fs <> nil then begin
       check_disc_changed;
       if ioresult <> ord(inoerror) then
	   fs := nil;
    end
    else begin
       try
	 fs := super_block_ptr_type(get_buf(unum,
					  SBLOCK*DEV_BSIZE,
					  0,
					  I_SB));
       recover
	   fs := nil;
    end;
999:
    {reportn('get_sb gets superblock', ord(fs));}
d1156 5
@


1.12
log
@Routines inumber, itype now here (for use by hfsalloc and hfsdam).
@
text
@d27 1
a498 1
		iptr^.ctime := hfs_time;
d515 1
a515 1
		    cgptr^.time := hfs_time;
@


1.11
log
@changed actions for put_*: reuse deleted, invalid added.
@
text
@d148 5
d1265 19
@


1.10
log
@fix get_edatablk bug that broke makedirectory
@
text
@d57 2
a58 2
{       a single put_* call.  There is also "reuse", which means that
{       the buffer contents are not worth saving.
d95 1
a95 1
    cache_action_type = (release, dirty, immediate, reuse);
d633 2
d684 1
a684 1
{       reuse -- buffer contents worthless, reuse buffer whenever needed
d725 1
a725 1
	if reuse in how then
d727 1
a727 1
	    age := 0;
a750 1

d1119 1
a1119 1
    reportn('get_sb gets superblock', ord(fs));
d1209 1
a1209 1
    {write('get datablk ', daddr:1, '  ');}
@


1.9
log
@remove get_super_bytes, simplify get_superblock so that
it doesn't retry through get_super_bytes.
@
text
@a508 1
		reportn('datablk size',size1);
d620 7
a635 7
    if this_item = I_EDATABLK then begin
	this_item := I_DATABLK;
	do_read := false;
    end
    else
	do_read := true;

a1060 1
    reportn('invalidate unit ', badunit);
d1118 1
@


1.8
log
@Recover from disc that's popped: get_super_bytes, block_in_cache added;
get_superblock now does a retry if mediumchanged.  sync resets use
counts, in case called after an escape, also invalidates bad unit.
@
text
@d437 1
a437 3
		get_super_bytes(unit, min(SBSIZE, max_size), disc_addr, wptr);
		if ioresult <> ord(inoerror) then
		    goto 999;
a454 2
		if ioresult <> ord(inoerror) then
		    goto 999;
a749 1
    kill_bufs := invalidate_unit;
d1062 1
a1097 1
    medium_changed := false;
a1105 1
       report('sb in cache');
d1107 11
a1117 12
       reportn('AFTER TOUCH, IORES ', ioresult);
       if ioresult = ord(inoerror) then
	 goto 999;
       { invalidate unit, which removes super from cache }
       invalidate_unit(unum);
       if ioresult <> ord(zmediumchanged) then begin
	 fs := nil;
	 goto 999;
       end;
       ioresult := ord(inoerror);
       medium_changed := true;
       { fall through to "not in cache" case }
a1118 8
    try
      report('sb not in cache');
      fs := super_block_ptr_type(get_buf(unum,
				       SBLOCK*DEV_BSIZE,
				       0,
				       I_SB));
    recover
	fs := nil;
d1120 4
a1123 5
    reportn('medium changed: ', ord(medium_changed));
    if medium_changed or (fs = nil) then
	reset_prefix(fs)
    else
	set_root_uvid(fs);
@


1.7
log
@Remove export decls for release_*, flush_*.
@
text
@d437 1
a437 1
		get_bytes(unit, min(SBSIZE, max_size), disc_addr, wptr);
d543 5
a547 7
{ generic get routine
{ find buffer with given item and address
{ failing that, find invalid buffer (unit = NO_UNIT)
{ failing that, find oldest valid unused buffer
{ failing that, panic
{ read the disc if info not already in the cache.
{ exception: I_EDATABLK is for datablks that we shouldn't read.
d549 5
a553 2
function get_buf(unit_wanted: integer; addr_wanted: integer;
		 size, this_item: integer): cache_blk_ptr_type;
d557 1
a557 1
    do_read: boolean;
a560 3
    oldest_hdr: buf_hdr_ptr_type;
    oldest_age: integer;
    invalid_hdr: buf_hdr_ptr_type;
a561 1
    get_buf := nil;
d564 1
a564 1
	goto 999;
d567 1
a567 6
    if this_item = I_EDATABLK then begin
	this_item := I_DATABLK;
	do_read := false;
    end
    else
	do_read := true;
d582 1
a582 1
		get_buf := buf_hdr^.dptr;
d598 2
d601 40
d754 1
d838 1
a838 1
    set_unit(unum);
d958 1
d962 1
d966 2
a967 1
	    with buf_hdr^ do
d969 7
a975 1
		    write_buf(buf_hdr);
d978 1
d981 1
d1054 1
d1064 3
d1086 8
d1096 2
d1100 1
d1102 27
a1128 2
    set_unit(unum);
    fs := super_block_ptr_type(get_buf(unum,
d1132 6
a1137 4
    if fs <> nil then begin
	current_unit := unum;
	current_super := fs;
    end
d1139 3
a1141 2
	current_unit := NO_UNIT;

@


1.6
log
@Remove obsolete release_* and flush_* calls.
@
text
@a144 9
procedure flush_superblock   (ptr   : super_block_ptr_type);
procedure release_superblock (ptr   : super_block_ptr_type);

procedure flush_cgroup   (ptr      : cgroup_ptr_type);
procedure release_cgroup (ptr      : cgroup_ptr_type);

procedure flush_inode   (ptr      : inode_ptr_type);
procedure release_inode (ptr      : inode_ptr_type);

a146 2
procedure flush_data   (ptr      : dev_block_ptr_type);
procedure release_data (ptr      : dev_block_ptr_type);
@


1.5
log
@Allow nil ptrs in put_*.  Escape in get_* on error.  New ioresults
for no room in cache, not configured properly.
@
text
@a1189 32
procedure flush_superblock   (ptr   : super_block_ptr_type);
begin
    put_superblock(ptr, [dirty,immediate]);
end;

procedure release_superblock (ptr   : super_block_ptr_type);
begin
    put_superblock(ptr, [release]);
end;

procedure flush_cgroup   (ptr      : cgroup_ptr_type);
begin
    put_cgroup(ptr, [dirty,immediate]);
end;

procedure release_cgroup (ptr      : cgroup_ptr_type);
begin
    put_cgroup(ptr, [release]);
end;


procedure flush_inode   (ptr      : inode_ptr_type);
begin
    put_inode(ptr, [dirty,immediate]);
end;

procedure release_inode (ptr      : inode_ptr_type);
begin
    put_inode(ptr, [release]);
end;


a1194 12
end;



procedure flush_data   (ptr      : dev_block_ptr_type);
begin
    put_datablk(cache_blk_ptr_type(ptr), [dirty,immediate]);
end;

procedure release_data (ptr      : dev_block_ptr_type);
begin
    put_datablk(cache_blk_ptr_type(ptr), [release]);
@


1.4
log
@diffs from 3.2c build
@
text
@a87 4
{type
    newioerr = (IOERRcorrupt, IOERRnospace, IOERRtoobig,
		IOERRnoinodes, IOERRinvalid, IOERRnotempty);}

d271 2
d405 1
d435 2
a436 1
		if (item = I_CG) and (cgptr^.magic <> CG_MAGIC) then
d449 1
a449 1
		if ioresult <> 0 then
d468 1
a468 1
		if ioresult <> 0 then
d513 1
a513 1
		if ioresult <> 0 then
d533 1
a533 1
		if ioresult <> 0 then
d576 2
a577 2

    if unit_wanted = NO_UNIT then
d579 1
d625 1
a625 2
	{ debugging printout }
	writeln('OUT OF BUFFERS, TYPE ', this_item:1);
d642 1
a642 2
	if ioresult <> 0 then begin
	    writeln('nil for get_buf after I/O fail');
a644 1
	    get_buf := nil;
a650 3
    {
    writeln('get_buf returning ', integer(buf_hdr^.dptr));
    }
d652 2
d668 2
d673 3
d709 1
d730 2
d736 4
a739 4
	newbytes(cache_space, cache_bytes);

$if not io_to_file$
$end$
d806 5
d816 1
a816 1
    if ioresult <> 0 then
d868 3
d898 1
a898 1
	writeln('NO SPACE IN CACHE');
d929 2
d935 2
d946 1
d956 2
d961 2
d966 2
a967 2
		if printmesg then
		  writeln('BUFFER STILL IN USE, type ', i:1, ' # ', j:1);
d975 2
d980 2
d1011 1
d1021 2
d1026 2
d1036 1
d1074 1
a1074 1
    if (release in how) and (fs^.fmod = FS_MODIFIED) then begin
a1255 3



@


1.3
log
@chnages from second hfsdam from bayes received 7 June
@
text
@d88 1
a88 1
type
d90 1
a90 1
		IOERRnoinodes, IOERRinvalid, IOERRnotempty);
d437 1
a437 1
		    ioresult := ord(IOERRcorrupt);
d459 1
a459 1
		    ioresult := ord(IOERRcorrupt);
@


1.2
log
@corrected cbit import to iocomasm import
@
text
@d524 1
@


1.1
log
@Initial revision
@
text
@d22 1
a22 1
$search 'hfstuff', 'cbit', 'hfscalc', 'support'$
a28 1
    cbit,
@
