summaryrefslogtreecommitdiff
path: root/nlpsolver/ThirdParty/EvolutionarySolver/src/net/adaptivebox/deps/DEPSAgent.java
blob: a67fe77e12c010da679a2e725dd3290b808ac361 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
package net.adaptivebox.deps;

/**
 * Description: The description of agent with hybrid differential evolution and particle swarm.
 *
 * @ Author        Create/Modi     Note
 * Xiaofeng Xie    Jun 10, 2004
 * Xiaofeng Xie    Jul 01, 2008
 *
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2.1 of the License, or (at your option) any later version.
 *
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * Please acknowledge the author(s) if you use this code in any way.
 *
 * @version 1.0
 * @Since MAOS1.0
 *
 * @References:
 * [1] Zhang W J, Xie X F. DEPSO: hybrid particle swarm with differential
 * evolution operator. IEEE International Conference on Systems, Man & Cybernetics,
 * Washington D C, USA, 2003: 3816-3821
 * [2] X F Xie, W J Zhang. SWAF: swarm algorithm framework for numerical
 *     optimization. Genetic and Evolutionary Computation Conference (GECCO),
 *     Seattle, WA, USA, 2004: 238-250
 *     -> an agent perspective
 */

import net.adaptivebox.deps.behavior.*;
import net.adaptivebox.goodness.IGoodnessCompareEngine;
import net.adaptivebox.knowledge.*;
import net.adaptivebox.problem.*;
import net.adaptivebox.space.*;

public class DEPSAgent implements ILibEngine {

  //Describes the problem to be solved
  private ProblemEncoder problemEncoder;
  //Forms the goodness landscape
  private IGoodnessCompareEngine qualityComparator;

  //store the point that generated in current learning cycle
  private SearchPoint trailPoint;

  //temp variable
  private AbsGTBehavior selectGTBehavior;

  //The referred library
  private Library socialLib;
  //the own memory: store the point that generated in old learning cycle
  private BasicPoint pold_t;
  //the own memory: store the point that generated in last learning cycle
  private BasicPoint pcurrent_t;
  //the own memory: store the personal best point
  private SearchPoint pbest_t;

  //Generate-and-test Behaviors
  private DEGTBehavior deGTBehavior;
  private PSGTBehavior psGTBehavior;
  public double switchP = 0.5;

  public void setLibrary(Library lib) {
    socialLib = lib;
    deGTBehavior.setLibrary(socialLib);
    psGTBehavior.setLibrary(socialLib);
  }

  public void setProblemEncoder(ProblemEncoder encoder) {
    problemEncoder = encoder;
    trailPoint = problemEncoder.getFreshSearchPoint();
    pold_t = problemEncoder.getFreshSearchPoint();
    pcurrent_t = problemEncoder.getFreshSearchPoint();
  }

  public void setSpecComparator(IGoodnessCompareEngine comparer) {
    qualityComparator = comparer;
  }

  public void setPbest(SearchPoint pbest) {
    pbest_t = pbest;
  }

  private AbsGTBehavior getGTBehavior() {
    if (Math.random()<switchP) {
      return deGTBehavior;
    } else {
      return psGTBehavior;
    }
  }

  public void setGTBehavior(AbsGTBehavior gtBehavior) {
    if (gtBehavior instanceof DEGTBehavior) {
      deGTBehavior = ((DEGTBehavior)gtBehavior);
      deGTBehavior.setPbest(pbest_t);
      return;
    }
    if (gtBehavior instanceof PSGTBehavior) {
      psGTBehavior = ((PSGTBehavior)gtBehavior);
      psGTBehavior.setMemPoints(pbest_t, pcurrent_t, pold_t);
      return;
    }
  }

  public void generatePoint() {
    // generates a new point in the search space (S) based on
    // its memory and the library
    selectGTBehavior = this.getGTBehavior();
    selectGTBehavior.generateBehavior(trailPoint, problemEncoder);
    //evaluate into goodness information
    problemEncoder.evaluate(trailPoint);
  }

  public void learn() {
    selectGTBehavior.testBehavior(trailPoint, qualityComparator);
  }

  public SearchPoint getMGState() {
    return trailPoint;
  }
}