FOSSology  4.4.0
Open Source License Compliance by Open Source Software
buckets.c
Go to the documentation of this file.
1 /*
2  SPDX-FileCopyrightText: © 2010-2014 Hewlett-Packard Development Company, L.P.
3 
4  SPDX-License-Identifier: GPL-2.0-only
5 */
6 
54 #include "buckets.h"
55 
57 int debug = 0;
58 
63 
65 #ifdef COMMIT_HASH_S
66 char BuildVersion[]="buckets build version: " VERSION_S " r(" COMMIT_HASH_S ").\n";
67 #else
68 char BuildVersion[]="buckets build version: NULL.\n";
69 #endif
70 
71 /****************************************************/
72 int main(int argc, char **argv)
73 {
74  char *agentDesc = "Bucket agent";
75  int cmdopt;
76  int verbose = 0;
77  int ReadFromStdin = 1;
78  int head_uploadtree_pk = 0;
79  PGconn *pgConn;
80  PGresult *topresult;
81  PGresult *result;
82  char sqlbuf[512];
83  char *Delims = ",= \t\n\r";
84  char *token, *saveptr;
85  int agent_pk = 0;
86  int nomos_agent_pk = 0;
87  int bucketpool_pk = 0;
88  int ars_pk = 0;
89  int readnum = 0;
90  int rv;
91  int hasPrules;
92  int user_pk = 0;
93  char *bucketpool_name;
94  char *COMMIT_HASH;
95  char *VERSION;
97  char agent_rev[myBUFSIZ];
98  int rerun = 0;
99 
100 
101 // int *bucketList;
102  pbucketdef_t bucketDefArray = 0;
103  pbucketdef_t tmpbucketDefArray = 0;
106  uploadtree.upload_fk = 0;
107 
108  /* connect to the scheduler */
109  fo_scheduler_connect(&argc, argv, &pgConn);
110  user_pk = fo_scheduler_userID(); /* get user_pk for user who queued the agent */
111 
112  /* command line options */
113  while ((cmdopt = getopt(argc, argv, "rin:p:t:u:vc:hV")) != -1)
114  {
115  switch (cmdopt)
116  {
117  case 'i': /* "Initialize" */
118  PQfinish(pgConn);
119  exit(0);
120  case 'n': /* bucketpool_name */
121  ReadFromStdin = 0;
122  bucketpool_name = optarg;
123  /* find the highest rev active bucketpool_pk */
124  if (!bucketpool_pk)
125  {
126  bucketpool_pk = getBucketpool_pk(pgConn, bucketpool_name);
127  if (!bucketpool_pk)
128  printf("%s is not an active bucketpool name.\n", bucketpool_name);
129  }
130  break;
131  case 'p': /* bucketpool_pk */
132  ReadFromStdin = 0;
133  bucketpool_pk = atoi(optarg);
134  /* validate bucketpool_pk */
135  sprintf(sqlbuf, "select bucketpool_pk from bucketpool where bucketpool_pk=%d and active='Y'", bucketpool_pk);
136  bucketpool_pk = validate_pk(pgConn, sqlbuf);
137  if (!bucketpool_pk)
138  printf("%d is not an active bucketpool_pk.\n", atoi(optarg));
139  break;
140  case 't': /* uploadtree_pk */
141  ReadFromStdin = 0;
142  if (uploadtree.upload_fk) break;
143  head_uploadtree_pk = atoi(optarg);
144  /* validate bucketpool_pk */
145  sprintf(sqlbuf, "select uploadtree_pk from uploadtree where uploadtree_pk=%d", head_uploadtree_pk);
146  head_uploadtree_pk = validate_pk(pgConn, sqlbuf);
147  if (!head_uploadtree_pk)
148  printf("%d is not an active uploadtree_pk.\n", atoi(optarg));
149  break;
150  case 'u': /* upload_pk */
151  ReadFromStdin = 0;
152  if (!head_uploadtree_pk)
153  {
154  uploadtree.upload_fk = atoi(optarg);
155  /* validate upload_pk and get uploadtree_pk */
156  sprintf(sqlbuf, "select upload_pk from upload where upload_pk=%d", uploadtree.upload_fk);
157  uploadtree.upload_fk = validate_pk(pgConn, sqlbuf);
158  if (!uploadtree.upload_fk)
159  printf("%d is not an valid upload_pk.\n", atoi(optarg));
160  else
161  {
162  sprintf(sqlbuf, "select uploadtree_pk from uploadtree where upload_fk=%d and parent is null", uploadtree.upload_fk);
163  head_uploadtree_pk = validate_pk(pgConn, sqlbuf);
164  }
165  }
166  break;
167  case 'v': /* verbose output for debugging */
168  verbose++;
169  break;
170  case 'c': break; /* handled by fo_scheduler_connect() */
171  case 'r':
172  rerun = 1;
173  break;
174  case 'V': /* print version info */
175  printf("%s", BuildVersion);
176  PQfinish(pgConn);
177  exit(0);
178  default:
179  Usage(argv[0]);
180  PQfinish(pgConn);
181  exit(-1);
182  }
183  }
184  debug = verbose;
185 
186  /*** validate command line ***/
187  if (!bucketpool_pk && !ReadFromStdin)
188  {
189  printf("FATAL: You must specify an active bucketpool.\n");
190  Usage(argv[0]);
191  exit(-1);
192  }
193  if (!head_uploadtree_pk && !ReadFromStdin)
194  {
195  printf("FATAL: You must specify a valid uploadtree_pk or upload_pk.\n");
196  Usage(argv[0]);
197  exit(-1);
198  }
199 
200  /* get agent pk
201  * Note, if GetAgentKey fails, this process will exit.
202  */
203  COMMIT_HASH = fo_sysconfig("buckets", "COMMIT_HASH");
204  VERSION = fo_sysconfig("buckets", "VERSION");
205  sprintf(agent_rev, "%s.%s", VERSION, COMMIT_HASH);
206  agent_pk = fo_GetAgentKey(pgConn, basename(argv[0]), uploadtree.upload_fk, agent_rev, agentDesc);
207 
208  /*** Initialize the license_ref table cache ***/
209  /* Build the license ref cache to hold 2**11 (2048) licenses.
210  This MUST be a power of 2.
211  */
212  cacheroot.maxnodes = 2<<11;
213  cacheroot.nodes = calloc(cacheroot.maxnodes, sizeof(cachenode_t));
214  if (!lrcache_init(pgConn, &cacheroot))
215  {
216  printf("FATAL: Bucket agent could not allocate license_ref table cache.\n");
217  exit(1);
218  }
219 
220 
221  /* main processing loop */
222  while(++readnum)
223  {
224  uploadtree.upload_fk = 0;
225  if (ReadFromStdin)
226  {
227  bucketpool_pk = 0;
228 
229  /* Read the bucketpool_pk and upload_pk from stdin.
230  * Format looks like 'bppk=123, upk=987'
231  */
232  if (!fo_scheduler_next()) break;
233 
234  token = strtok_r(fo_scheduler_current(), Delims, &saveptr);
235  while (token && (!uploadtree.upload_fk || !bucketpool_pk))
236  {
237  if (strcmp(token, "bppk") == 0)
238  {
239  bucketpool_pk = atoi(strtok_r(NULL, Delims, &saveptr));
240  }
241  else
242  if (strcmp(token, "upk") == 0)
243  {
244  uploadtree.upload_fk = atoi(strtok_r(NULL, Delims, &saveptr));
245  }
246  token = strtok_r(NULL, Delims, &saveptr);
247  }
248 
249  /* Check Permissions */
250  if (GetUploadPerm(pgConn, uploadtree.upload_fk, user_pk) < PERM_WRITE)
251  {
252  LOG_ERROR("You have no update permissions on upload %d", uploadtree.upload_fk);
253  continue;
254  }
255 
256  /* From the upload_pk, get the head of the uploadtree, pfile_pk and ufile_name */
257  sprintf(sqlbuf, "select uploadtree_pk, pfile_fk, ufile_name, ufile_mode,lft,rgt from uploadtree \
258  where upload_fk='%d' and parent is null limit 1", uploadtree.upload_fk);
259  topresult = PQexec(pgConn, sqlbuf);
260  if (fo_checkPQresult(pgConn, topresult, sqlbuf, agentDesc, __LINE__)) return -1;
261  if (PQntuples(topresult) == 0)
262  {
263  printf("ERROR: %s.%s missing upload_pk %d.\nsql: %s",
264  __FILE__, agentDesc, uploadtree.upload_fk, sqlbuf);
265  PQclear(topresult);
266  continue;
267  }
268  head_uploadtree_pk = atol(PQgetvalue(topresult, 0, 0));
269  uploadtree.uploadtree_pk = head_uploadtree_pk;
270  uploadtree.upload_fk = uploadtree.upload_fk;
271  uploadtree.pfile_fk = atol(PQgetvalue(topresult, 0, 1));
272  uploadtree.ufile_name = strdup(PQgetvalue(topresult, 0, 2));
273  uploadtree.ufile_mode = atoi(PQgetvalue(topresult, 0, 3));
274  uploadtree.lft = atoi(PQgetvalue(topresult, 0, 4));
275  uploadtree.rgt = atoi(PQgetvalue(topresult, 0, 5));
276  PQclear(topresult);
277  } /* end ReadFromStdin */
278  else
279  {
280  /* Only one input to process if from command line, so terminate if it's been done */
281  if (readnum > 1) break;
282 
283  /* not reading from stdin
284  * Get the pfile, and ufile_name for head_uploadtree_pk
285  */
286  sprintf(sqlbuf, "select pfile_fk, ufile_name, ufile_mode,lft,rgt, upload_fk from uploadtree where uploadtree_pk=%d", head_uploadtree_pk);
287  topresult = PQexec(pgConn, sqlbuf);
288  if (fo_checkPQresult(pgConn, topresult, sqlbuf, agentDesc, __LINE__))
289  {
290  free(uploadtree.ufile_name);
291  return -1;
292  }
293  if (PQntuples(topresult) == 0)
294  {
295  printf("FATAL: %s.%s missing root uploadtree_pk %d\n",
296  __FILE__, agentDesc, head_uploadtree_pk);
297  PQclear(topresult);
298  continue;
299  }
300  uploadtree.uploadtree_pk = head_uploadtree_pk;
301  uploadtree.pfile_fk = atol(PQgetvalue(topresult, 0, 0));
302  uploadtree.ufile_name = strdup(PQgetvalue(topresult, 0, 1));
303  uploadtree.ufile_mode = atoi(PQgetvalue(topresult, 0, 2));
304  uploadtree.lft = atoi(PQgetvalue(topresult, 0, 3));
305  uploadtree.rgt = atoi(PQgetvalue(topresult, 0, 4));
306  uploadtree.upload_fk = atoi(PQgetvalue(topresult, 0, 5));
307  PQclear(topresult);
308  }
309 
310  /* Find the most recent nomos data for this upload. That's what we want to use
311  to process the buckets.
312  */
313  nomos_agent_pk = LatestNomosAgent(pgConn, uploadtree.upload_fk);
314  if (nomos_agent_pk == 0)
315  {
316  printf("WARNING: Bucket agent called on treeitem (%d), but the latest nomos agent hasn't created any license data for this tree.\n",
317  head_uploadtree_pk);
318  continue;
319  }
320 
321  /* at this point we know:
322  * bucketpool_pk, bucket agent_pk, nomos agent_pk, upload_pk,
323  * pfile_pk, and head_uploadtree_pk (the uploadtree_pk of the head tree to scan)
324  */
325 
326  /* Has the upload already been processed? If so, we are done.
327  Don't even bother to create a bucket_ars entry.
328  */
329  switch (UploadProcessed(pgConn, agent_pk, nomos_agent_pk, uploadtree.pfile_fk, head_uploadtree_pk, uploadtree.upload_fk, bucketpool_pk))
330  {
331  case 1: /* upload has already been processed */
332  if (1 == rerun) break;
333  printf("LOG: Duplicate request for bucket agent to process upload_pk: %d, uploadtree_pk: %d, bucketpool_pk: %d, bucket agent_pk: %d, nomos agent_pk: %d, pfile_pk: %d ignored.\n",
334  uploadtree.upload_fk, head_uploadtree_pk, bucketpool_pk, agent_pk, nomos_agent_pk, uploadtree.pfile_fk);
335  continue;
336  case -1: /* SQL error, UploadProcessed() wrote error message */
337  continue;
338  case 0: /* upload has not been processed */
339  break;
340  }
341 
342  /*** Initialize the Bucket Definition List bucketDefArray ***/
343  bucketDefArray = initBuckets(pgConn, bucketpool_pk, &cacheroot);
344  if (bucketDefArray == 0)
345  {
346  printf("FATAL: %s.%d Bucket definition for pool %d could not be initialized.\n",
347  __FILE__, __LINE__, bucketpool_pk);
348  exit(-2);
349  }
350  bucketDefArray->nomos_agent_pk = nomos_agent_pk;
351  bucketDefArray->bucket_agent_pk = agent_pk;
352 
353  /* Find the correct uploadtree table name */
355  if (!(uploadtree_tablename))
356  {
357  LOG_FATAL("buckets passed invalid upload, upload_pk = %d", uploadtree.upload_fk);
358  return(-110);
359  }
360 
361  /* set uploadtree_tablename in all the bucket definition structs */
362  for (tmpbucketDefArray = bucketDefArray; tmpbucketDefArray->bucket_pk; tmpbucketDefArray++)
363  {
364  tmpbucketDefArray->uploadtree_tablename = uploadtree_tablename;
365  }
366 
367  /* loop through rules (bucket defs) to see if there are any package only rules */
368  hasPrules = 0;
369  for (tmpbucketDefArray = bucketDefArray; tmpbucketDefArray->bucket_pk; tmpbucketDefArray++)
370  if (tmpbucketDefArray->applies_to == 'p')
371  {
372  hasPrules = 1;
373  break;
374  }
375 
376  /*** END initializing bucketDefArray ***/
377 
378  /*** Initialize DEB_SOURCE and DEB_BINARY ***/
379  sprintf(sqlbuf, "select mimetype_pk from mimetype where mimetype_name='application/x-debian-package'");
380  result = PQexec(pgConn, sqlbuf);
381  if (fo_checkPQresult(pgConn, result, sqlbuf, __FILE__, __LINE__)) return -1;
382  if (PQntuples(result) == 0)
383  {
384  printf("FATAL: (%s.%d) Missing application/x-debian-package mimetype.\n",__FILE__,__LINE__);
385  return -1;
386  }
387  DEB_BINARY = atoi(PQgetvalue(result, 0, 0));
388  PQclear(result);
389 
390  sprintf(sqlbuf, "select mimetype_pk from mimetype where mimetype_name='application/x-debian-source'");
391  result = PQexec(pgConn, sqlbuf);
392  if (fo_checkPQresult(pgConn, result, sqlbuf, __FILE__, __LINE__)) return -1;
393  if (PQntuples(result) == 0)
394  {
395  printf("FATAL: (%s.%d) Missing application/x-debian-source mimetype.\n",__FILE__,__LINE__);
396  return -1;
397  }
398  DEB_SOURCE = atoi(PQgetvalue(result, 0, 0));
399  PQclear(result);
400  /*** END Initialize DEB_SOURCE and DEB_BINARY ***/
401 
402  /*** Record analysis start in bucket_ars, the bucket audit trail. ***/
403  if (0 == rerun) { // do not have any bucket scan on this upload
404  snprintf(sqlbuf, sizeof(sqlbuf),
405  "insert into bucket_ars (agent_fk, upload_fk, ars_success, nomosagent_fk, bucketpool_fk) values(%d,%d,'%s',%d,%d)",
406  agent_pk, uploadtree.upload_fk, "false", nomos_agent_pk, bucketpool_pk);
407  if (debug)
408  printf("%s(%d): %s\n", __FILE__, __LINE__, sqlbuf);
409 
410  result = PQexec(pgConn, sqlbuf);
411  if (fo_checkPQcommand(pgConn, result, sqlbuf, __FILE__ ,__LINE__)) return -1;
412  PQclear(result);
413 
414  /* retrieve the ars_pk of the newly inserted record */
415  sprintf(sqlbuf, "select ars_pk from bucket_ars where agent_fk='%d' and upload_fk='%d' and ars_success='%s' and nomosagent_fk='%d' \
416  and bucketpool_fk='%d' and ars_endtime is null \
417  order by ars_starttime desc limit 1",
418  agent_pk, uploadtree.upload_fk, "false", nomos_agent_pk, bucketpool_pk);
419  result = PQexec(pgConn, sqlbuf);
420  if (fo_checkPQresult(pgConn, result, sqlbuf, __FILE__, __LINE__)) return -1;
421  if (PQntuples(result) == 0)
422  {
423  printf("FATAL: (%s.%d) Missing bucket_ars record.\n%s\n",__FILE__,__LINE__,sqlbuf);
424  return -1;
425  }
426  ars_pk = atol(PQgetvalue(result, 0, 0));
427  PQclear(result);
428  }
429  /*** END bucket_ars insert ***/
430 
431  if (debug) printf("%s sql: %s\n",__FILE__, sqlbuf);
432 
433  /* process the tree for buckets
434  Do this as a single transaction, therefore this agent must be
435  run as a single thread. This will prevent the scheduler from
436  consuming excess time (this is a fast agent), and allow this
437  process to update bucket_ars.
438  */
439  rv = walkTree(pgConn, bucketDefArray, agent_pk, head_uploadtree_pk, 0,
440  hasPrules);
441  /* if no errors and top level is a container, process the container */
442  if ((!rv) && (IsContainer(uploadtree.ufile_mode)))
443  {
444  rv = processFile(pgConn, bucketDefArray, &uploadtree, agent_pk, hasPrules);
445  }
446 
447  /* Record analysis end in bucket_ars, the bucket audit trail. */
448  if (0 == rerun && ars_pk)
449  {
450  if (rv)
451  snprintf(sqlbuf, sizeof(sqlbuf),
452  "update bucket_ars set ars_endtime=now(), ars_success=false where ars_pk='%d'",
453  ars_pk);
454  else
455  snprintf(sqlbuf, sizeof(sqlbuf),
456  "update bucket_ars set ars_endtime=now(), ars_success=true where ars_pk='%d'",
457  ars_pk);
458 
459  if (debug)
460  printf("%s(%d): %s\n", __FILE__, __LINE__, sqlbuf);
461 
462  result = PQexec(pgConn, sqlbuf);
463  if (fo_checkPQcommand(pgConn, result, sqlbuf, __FILE__ ,__LINE__)) return -1;
464  PQclear(result);
465  }
466  } /* end of main processing loop */
467 
469  free(bucketDefArray);
470 
471  PQfinish(pgConn);
473  return (0);
474 }
PGconn * pgConn
Database connection.
Definition: adj2nest.c:86
char * uploadtree_tablename
upload.uploadtree_tablename
Definition: adj2nest.c:100
int agent_pk
Definition: agent.h:74
char BuildVersion[]
Definition: buckets.c:68
int main(int argc, char **argv)
Definition: buckets.c:72
int DEB_SOURCE
Definition: buckets.c:60
int DEB_BINARY
Definition: buckets.c:62
int debug
Definition: buckets.c:57
int verbose
The verbose flag for the cli.
Definition: fo_cli.c:38
Usage()
Print Usage statement.
Definition: fo_dbcheck.php:63
FUNCTION int LatestNomosAgent(PGconn *pgConn, int upload_pk)
Get the latest nomos agent_pk that has data for this this uploadtree.
Definition: inits.c:603
FUNCTION int getBucketpool_pk(PGconn *pgConn, char *bucketpool_name)
Get a bucketpool_pk based on the bucketpool_name.
Definition: inits.c:23
FUNCTION pbucketdef_t initBuckets(PGconn *pgConn, int bucketpool_pk, cacheroot_t *pcroot)
Initialize the bucket definition list. If an error occured, write the error to stdout.
Definition: inits.c:52
FUNCTION char * GetUploadtreeTableName(PGconn *pgConn, int upload_pk)
Get the uploadtree table name for this upload_pk If upload_pk does not exist, return "uploadtree".
Definition: libfossagent.c:414
FUNCTION int GetUploadPerm(PGconn *pgConn, long UploadPk, int user_pk)
Get users permission to this upload.
Definition: libfossagent.c:378
FUNCTION int fo_GetAgentKey(PGconn *pgConn, const char *agent_name, long Upload_pk, const char *rev, const char *agent_desc)
Get the latest enabled agent key (agent_pk) from the database.
Definition: libfossagent.c:158
int fo_checkPQresult(PGconn *pgConn, PGresult *result, char *sql, char *FileID, int LineNumb)
Check the result status of a postgres SELECT.
Definition: libfossdb.c:170
int fo_checkPQcommand(PGconn *pgConn, PGresult *result, char *sql, char *FileID, int LineNumb)
Check the result status of a postgres commands (not select) If an error occured, write the error to s...
Definition: libfossdb.c:204
#define PERM_WRITE
Read-Write permission.
Definition: libfossology.h:33
void fo_scheduler_disconnect(int retcode)
Disconnect the scheduler connection.
char * fo_sysconfig(const char *sectionname, const char *variablename)
gets a system configuration variable from the configuration data.
int fo_scheduler_userID()
Gets the id of the user that created the job that the agent is running.
char * fo_scheduler_current()
Get the last read string from the scheduler.
char * fo_scheduler_next()
Get the next data to process from the scheduler.
void fo_scheduler_connect(int *argc, char **argv, PGconn **db_conn)
Establish a connection between an agent and the scheduler.
FUNCTION void lrcache_free(cacheroot_t *pcroot)
Free the hash table.
Definition: liccache.c:72
FUNCTION int lrcache_init(PGconn *pgConn, cacheroot_t *pcroot)
Build a cache the license ref db table.
Definition: liccache.c:174
int nomos_agent_pk
Definition: buckets.h:70
int bucket_agent_pk
Definition: buckets.h:71
char applies_to
Definition: buckets.h:69
int maxnodes
No. of nodes in the list.
Definition: liccache.h:42
cachenode_t * nodes
Array of nodes.
Definition: liccache.h:43
Contains information required by uploadtree elements.
Definition: adj2nest.c:93
FUNCTION int UploadProcessed(PGconn *pgConn, int bucketagent_pk, int nomosagent_pk, int pfile_pk, int uploadtree_pk, int upload_pk, int bucketpool_pk)
Has this upload already been bucket processed? This function checks buckets_ars to see if the upload ...
Definition: validate.c:204
FUNCTION int validate_pk(PGconn *pgConn, char *sql)
Verify a primary key exists.
Definition: validate.c:84
FUNCTION int walkTree(PGconn *pgConn, pbucketdef_t bucketDefArray, int agent_pk, int uploadtree_pk, int skipProcessedCheck, int hasPrules)
This function does a recursive depth first walk through a file tree (uploadtree).
Definition: walk.c:34
FUNCTION int processFile(PGconn *pgConn, pbucketdef_t bucketDefArray, puploadtree_t puploadtree, int agent_pk, int hasPrules)
Process a file.
Definition: walk.c:167